shared_ptr<Playlist> playlist (new Playlist);
playlist->add (content);
shared_ptr<Player> player (new Player (_film, playlist));
- player->disable_video ();
- player->Audio.connect (bind (&AnalyseAudioJob::audio, this, _1, _2));
-
int64_t const len = _film->length().frames (_film->audio_frame_rate());
_samples_per_point = max (int64_t (1), len / _num_points);
_analysis.reset (new AudioAnalysis (_film->audio_channels ()));
_done = 0;
- while (!player->pass ()) {
- set_progress (double (_done) / len);
+ DCPTime const block = DCPTime::from_seconds (1.0 / 8);
+ for (DCPTime t; t < _film->length(); t += block) {
+ analyse (player->get_audio (t, block, false));
+ set_progress (t.seconds() / _film->length().seconds());
}
_analysis->write (content->audio_analysis_path ());
}
void
-AnalyseAudioJob::audio (shared_ptr<const AudioBuffers> b, DCPTime)
+AnalyseAudioJob::analyse (shared_ptr<const AudioBuffers> b)
{
for (int i = 0; i < b->frames(); ++i) {
for (int j = 0; j < b->channels(); ++j) {
void run ();
private:
- void audio (boost::shared_ptr<const AudioBuffers>, DCPTime);
+ void analyse (boost::shared_ptr<const AudioBuffers>);
boost::weak_ptr<AudioContent> _content;
int64_t _done;
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "log.h"
#include "resampler.h"
#include "util.h"
+#include "film.h"
#include "i18n.h"
}
}
-/** Audio timestamping is made hard by many factors, but the final nail in the coffin is resampling.
+shared_ptr<ContentAudio>
+AudioDecoder::get_audio (AudioFrame frame, AudioFrame length, bool accurate)
+{
+ shared_ptr<ContentAudio> dec;
+
+ AudioFrame const end = frame + length - 1;
+
+ if (frame < _decoded_audio.frame || end > (_decoded_audio.frame + length * 4)) {
+ /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
+ seek (ContentTime::from_frames (frame, _audio_content->content_audio_frame_rate()), accurate);
+ }
+
+ /* Now enough pass() calls will either:
+ * (a) give us what we want, or
+ * (b) hit the end of the decoder.
+ *
+ * If we are being accurate, we want the right frames,
+ * otherwise any frames will do.
+ */
+ if (accurate) {
+ while (!pass() && _decoded_audio.audio->frames() < length) {}
+ } else {
+ while (!pass() && (_decoded_audio.frame > frame || (_decoded_audio.frame + _decoded_audio.audio->frames()) < end)) {}
+ }
+
+ /* Clean up decoded */
+
+ AudioFrame const decoded_offset = frame - _decoded_audio.frame;
+ AudioFrame const amount_left = _decoded_audio.audio->frames() - decoded_offset;
+ _decoded_audio.audio->move (decoded_offset, 0, amount_left);
+ _decoded_audio.audio->set_frames (amount_left);
+
+ shared_ptr<AudioBuffers> out (new AudioBuffers (_decoded_audio.audio->channels(), length));
+ out->copy_from (_decoded_audio.audio.get(), length, frame - _decoded_audio.frame, 0);
+
+ return shared_ptr<ContentAudio> (new ContentAudio (out, frame));
+}
+
+/** Called by subclasses when audio data is ready.
+ *
+ * Audio timestamping is made hard by many factors, but perhaps the most entertaining is resampling.
* We have to assume that we are feeding continuous data into the resampler, and so we get continuous
* data out. Hence we do the timestamping here, post-resampler, just by counting samples.
*
}
if (!_audio_position) {
- _audio_position = time;
+ _audio_position = time.frames (_audio_content->output_audio_frame_rate ());
}
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (_audio_position.get (), data)));
- _audio_position = _audio_position.get() + ContentTime (data->frames (), _audio_content->output_audio_frame_rate ());
+ assert (_audio_position >= (_decoded_audio.frame + _decoded_audio.audio->frames()));
+
+ /* Resize _decoded_audio to fit the new data */
+ _decoded_audio.audio->ensure_size (_audio_position.get() + data->frames() - _decoded_audio.frame);
+
+ /* Copy new data in */
+ _decoded_audio.audio->copy_from (data.get(), data->frames(), 0, _audio_position.get() - _decoded_audio.frame);
+ _audio_position = _audio_position.get() + data->frames ();
}
+/* XXX: called? */
void
AudioDecoder::flush ()
{
return;
}
+ /*
shared_ptr<const AudioBuffers> b = _resampler->flush ();
if (b) {
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (_audio_position.get (), b)));
- _audio_position = _audio_position.get() + ContentTime (b->frames (), _audio_content->output_audio_frame_rate ());
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (b, _audio_position.get ())));
+ _audio_position = _audio_position.get() + b->frames ();
}
+ */
}
void
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "decoder.h"
#include "content.h"
#include "audio_content.h"
-#include "decoded.h"
+#include "content_audio.h"
class AudioBuffers;
class Resampler;
return _audio_content;
}
- void seek (ContentTime time, bool accurate);
+ /** Try to fetch some audio from a specific place in this content.
+ * @param frame Frame to start from.
+ * @param length Frames to get.
+ * @param accurate true to try hard to return frames from exactly `frame', false if we don't mind nearby frames.
+ * @return Time-stamped audio data which may or may not be from the location (and of the length) requested.
+ */
+ boost::shared_ptr<ContentAudio> get_audio (AudioFrame time, AudioFrame length, bool accurate);
protected:
+ void seek (ContentTime time, bool accurate);
void audio (boost::shared_ptr<const AudioBuffers>, ContentTime);
void flush ();
boost::shared_ptr<const AudioContent> _audio_content;
boost::shared_ptr<Resampler> _resampler;
- boost::optional<ContentTime> _audio_position;
+ boost::optional<AudioFrame> _audio_position;
+ /** Currently-available decoded audio data */
+ ContentAudio _decoded_audio;
};
#endif
+++ /dev/null
-/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include "audio_buffers.h"
-#include "audio_merger.h"
-
-using std::min;
-using std::max;
-using boost::shared_ptr;
-
-AudioMerger::AudioMerger (int channels, int frame_rate)
- : _buffers (new AudioBuffers (channels, 0))
- , _frame_rate (frame_rate)
- , _last_pull (0)
-{
-
-}
-
-
-TimedAudioBuffers
-AudioMerger::pull (DCPTime time)
-{
- assert (time >= _last_pull);
-
- TimedAudioBuffers out;
-
- int64_t const to_return = DCPTime (time - _last_pull).frames (_frame_rate);
- out.audio.reset (new AudioBuffers (_buffers->channels(), to_return));
- /* And this is how many we will get from our buffer */
- int64_t const to_return_from_buffers = min (to_return, int64_t (_buffers->frames ()));
-
- /* Copy the data that we have to the back end of the return buffer */
- out.audio->copy_from (_buffers.get(), to_return_from_buffers, 0, to_return - to_return_from_buffers);
- /* Silence any gap at the start */
- out.audio->make_silent (0, to_return - to_return_from_buffers);
-
- out.time = _last_pull;
- _last_pull = time;
-
- /* And remove the data we're returning from our buffers */
- if (_buffers->frames() > to_return_from_buffers) {
- _buffers->move (to_return_from_buffers, 0, _buffers->frames() - to_return_from_buffers);
- }
- _buffers->set_frames (_buffers->frames() - to_return_from_buffers);
-
- return out;
-}
-
-void
-AudioMerger::push (shared_ptr<const AudioBuffers> audio, DCPTime time)
-{
- assert (time >= _last_pull);
-
- int64_t frame = time.frames (_frame_rate);
- int64_t after = max (int64_t (_buffers->frames()), frame + audio->frames() - _last_pull.frames (_frame_rate));
- _buffers->ensure_size (after);
- _buffers->accumulate_frames (audio.get(), 0, frame - _last_pull.frames (_frame_rate), audio->frames ());
- _buffers->set_frames (after);
-}
-
-TimedAudioBuffers
-AudioMerger::flush ()
-{
- if (_buffers->frames() == 0) {
- return TimedAudioBuffers ();
- }
-
- return TimedAudioBuffers (_buffers, _last_pull);
-}
-
-void
-AudioMerger::clear (DCPTime t)
-{
- _last_pull = t;
- _buffers.reset (new AudioBuffers (_buffers->channels(), 0));
-}
+++ /dev/null
-/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include "util.h"
-
-class AudioBuffers;
-
-class AudioMerger
-{
-public:
- AudioMerger (int channels, int frame_rate);
-
- /** Pull audio up to a given time; after this call, no more data can be pushed
- * before the specified time.
- */
- TimedAudioBuffers pull (DCPTime time);
- void push (boost::shared_ptr<const AudioBuffers> audio, DCPTime time);
- TimedAudioBuffers flush ();
- void clear (DCPTime t);
-
-private:
- boost::shared_ptr<AudioBuffers> _buffers;
- int _frame_rate;
- DCPTime _last_pull;
-};
static ContentTime from_frames (int64_t f, T r) {
return ContentTime (f * HZ / r);
}
+
+ static ContentTime max () {
+ return ContentTime (INT64_MAX);
+ }
};
std::ostream& operator<< (std::ostream& s, ContentTime t);
+++ /dev/null
-/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#ifndef DCPOMATIC_LIB_DECODED_H
-#define DCPOMATIC_LIB_DECODED_H
-
-#include <dcp/subtitle_string.h>
-#include "types.h"
-#include "rect.h"
-#include "util.h"
-
-class Image;
-
-class Decoded
-{
-public:
- Decoded ()
- : content_time (0)
- , dcp_time (0)
- {}
-
- Decoded (ContentTime t)
- : content_time (t)
- , dcp_time (0)
- {}
-
- virtual ~Decoded () {}
-
- virtual void set_dcp_times (FrameRateChange frc, DCPTime offset)
- {
- dcp_time = DCPTime (content_time, frc) + offset;
- }
-
- ContentTime content_time;
- DCPTime dcp_time;
-};
-
-/** One frame of video from a VideoDecoder */
-class DecodedVideo : public Decoded
-{
-public:
- DecodedVideo ()
- : eyes (EYES_BOTH)
- , same (false)
- {}
-
- DecodedVideo (ContentTime t, boost::shared_ptr<const Image> im, Eyes e, bool s)
- : Decoded (t)
- , image (im)
- , eyes (e)
- , same (s)
- {}
-
- boost::shared_ptr<const Image> image;
- Eyes eyes;
- bool same;
-};
-
-class DecodedAudio : public Decoded
-{
-public:
- DecodedAudio (ContentTime t, boost::shared_ptr<const AudioBuffers> d)
- : Decoded (t)
- , data (d)
- {}
-
- boost::shared_ptr<const AudioBuffers> data;
-};
-
-class DecodedImageSubtitle : public Decoded
-{
-public:
- DecodedImageSubtitle ()
- : content_time_to (0)
- , dcp_time_to (0)
- {}
-
- DecodedImageSubtitle (ContentTime f, ContentTime t, boost::shared_ptr<Image> im, dcpomatic::Rect<double> r)
- : Decoded (f)
- , content_time_to (t)
- , dcp_time_to (0)
- , image (im)
- , rect (r)
- {}
-
- void set_dcp_times (FrameRateChange frc, DCPTime offset)
- {
- Decoded::set_dcp_times (frc, offset);
- dcp_time_to = DCPTime (content_time_to, frc) + offset;
- }
-
- ContentTime content_time_to;
- DCPTime dcp_time_to;
- boost::shared_ptr<Image> image;
- dcpomatic::Rect<double> rect;
-};
-
-class DecodedTextSubtitle : public Decoded
-{
-public:
- DecodedTextSubtitle ()
- : content_time_to (0)
- , dcp_time_to (0)
- {}
-
- /* Assuming that all subs are at the same time */
- DecodedTextSubtitle (std::list<dcp::SubtitleString> s)
- : Decoded (ContentTime::from_seconds (s.front().in().to_ticks() * 4 / 1000.0))
- , content_time_to (ContentTime::from_seconds (s.front().out().to_ticks() * 4 / 1000.0))
- , subs (s)
- {
-
- }
-
- void set_dcp_times (FrameRateChange frc, DCPTime offset)
- {
- Decoded::set_dcp_times (frc, offset);
- dcp_time_to = DCPTime (content_time_to, frc) + offset;
- }
-
- ContentTime content_time_to;
- DCPTime dcp_time_to;
- std::list<dcp::SubtitleString> subs;
-};
-
-#endif
+++ /dev/null
-/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-/** @file src/decoder.cc
- * @brief Parent class for decoders of content.
- */
-
-#include "decoder.h"
-#include "decoded.h"
-
-#include "i18n.h"
-
-using std::cout;
-using boost::shared_ptr;
-
-/** @param o Decode options.
- */
-Decoder::Decoder ()
- : _done (false)
-{
-
-}
-
-struct DecodedSorter
-{
- bool operator() (shared_ptr<Decoded> a, shared_ptr<Decoded> b)
- {
- return a->dcp_time < b->dcp_time;
- }
-};
-
-shared_ptr<Decoded>
-Decoder::peek ()
-{
- while (!_done && _pending.empty ()) {
- _done = pass ();
- }
-
- if (_done && _pending.empty ()) {
- return shared_ptr<Decoded> ();
- }
-
- _pending.sort (DecodedSorter ());
- return _pending.front ();
-}
-
-void
-Decoder::consume ()
-{
- if (!_pending.empty ()) {
- _pending.pop_front ();
- }
-}
-
-void
-Decoder::seek (ContentTime, bool)
-{
- _pending.clear ();
- _done = false;
-}
#include "dcpomatic_time.h"
class Decoded;
+class Film;
/** @class Decoder.
* @brief Parent class for decoders of content.
class Decoder : public boost::noncopyable
{
public:
- Decoder ();
virtual ~Decoder () {}
+protected:
/** Seek so that the next peek() will yield the next thing
* (video/sound frame, subtitle etc.) at or after the requested
* time. Pass accurate = true to try harder to get close to
* the request.
*/
- virtual void seek (ContentTime time, bool accurate);
-
- boost::shared_ptr<Decoded> peek ();
-
- /* Consume the last peek()ed thing so that it won't be returned
- * from the next peek().
- */
- void consume ();
-
-protected:
-
- /** Perform one decode pass of the content, which may or may not
- * result in a complete quantum (Decoded object) of decoded stuff
- * being added to _pending.
- * @return true if the decoder is done (i.e. no more data will be
- * produced by any future calls to pass() without a seek() first).
- */
+ virtual void seek (ContentTime time, bool accurate) = 0;
virtual bool pass () = 0;
- virtual void flush () {};
-
- std::list<boost::shared_ptr<Decoded> > _pending;
- bool _done;
};
#endif
#include "writer.h"
#include "server_finder.h"
#include "player.h"
+#include "dcp_video.h"
#include "i18n.h"
, _video_frames_out (0)
, _terminate (false)
{
- _have_a_real_frame[EYES_BOTH] = false;
- _have_a_real_frame[EYES_LEFT] = false;
- _have_a_real_frame[EYES_RIGHT] = false;
+
}
Encoder::~Encoder ()
}
void
-Encoder::process_video (shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, bool same)
+Encoder::process_video (shared_ptr<DCPVideo> frame)
{
_waker.nudge ();
rethrow ();
if (_writer->can_fake_write (_video_frames_out)) {
- _writer->fake_write (_video_frames_out, eyes);
- _have_a_real_frame[eyes] = false;
- frame_done ();
- } else if (same && _have_a_real_frame[eyes]) {
- /* Use the last frame that we encoded. */
- _writer->repeat (_video_frames_out, eyes);
+ _writer->fake_write (_video_frames_out, frame->eyes ());
frame_done ();
} else {
/* Queue this new frame for encoding */
TIMING ("adding to queue of %1", _queue.size ());
_queue.push_back (shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
- image->image(PIX_FMT_RGB24, false), _video_frames_out, eyes, conversion, _film->video_frame_rate(),
- _film->j2k_bandwidth(), _film->resolution(), _film->log()
+ frame->image(PIX_FMT_RGB24, false),
+ _video_frames_out,
+ frame->eyes(),
+ frame->conversion(),
+ _film->video_frame_rate(),
+ _film->j2k_bandwidth(),
+ _film->resolution(),
+ _film->log()
)
));
_condition.notify_all ();
- _have_a_real_frame[eyes] = true;
}
- if (eyes != EYES_LEFT) {
+ if (frame->eyes() != EYES_LEFT) {
++_video_frames_out;
}
}
class Writer;
class Job;
class ServerFinder;
-class PlayerImage;
+class DCPVideo;
/** @class Encoder
* @brief Encoder to J2K and WAV for DCP.
void process_begin ();
/** Call with a frame of video.
- * @param i Video frame image.
- * @param same true if i is the same as the last time we were called.
+ * @param f Video frame.
*/
- void process_video (boost::shared_ptr<PlayerImage> i, Eyes eyes, ColourConversion, bool same);
+ void process_video (boost::shared_ptr<DCPVideo> f);
/** Call with some audio data */
void process_audio (boost::shared_ptr<const AudioBuffers>);
/** Number of video frames written for the DCP so far */
int _video_frames_out;
- bool _have_a_real_frame[EYES_COUNT];
bool _terminate;
std::list<boost::shared_ptr<DCPVideoFrame> > _queue;
std::list<boost::thread *> _threads;
using boost::dynamic_pointer_cast;
using dcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool video, bool audio, bool subtitles)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
: VideoDecoder (c)
, AudioDecoder (c)
, FFmpeg (c)
, _log (log)
, _subtitle_codec_context (0)
, _subtitle_codec (0)
- , _decode_video (video)
- , _decode_audio (audio)
- , _decode_subtitles (subtitles)
- , _pts_offset (0)
{
setup_subtitle ();
We will do pts_to_use = pts_from_ffmpeg + pts_offset;
*/
- bool const have_video = video && c->first_video();
- bool const have_audio = _decode_audio && c->audio_stream () && c->audio_stream()->first_audio;
+ bool const have_video = c->first_video();
+ bool const have_audio = c->audio_stream () && c->audio_stream()->first_audio;
/* First, make one of them start at 0 */
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
- if (_decode_video) {
- while (decode_video_packet ()) {}
- }
+ while (decode_video_packet ()) {}
- if (_ffmpeg_content->audio_stream() && _decode_audio) {
+ if (_ffmpeg_content->audio_stream()) {
decode_audio_packet ();
AudioDecoder::flush ();
}
int const si = _packet.stream_index;
- if (si == _video_stream && _decode_video) {
+ if (si == _video_stream) {
decode_video_packet ();
- } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si) && _decode_audio) {
+ } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si)) {
decode_audio_packet ();
- } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si) && _decode_subtitles) {
+ } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
}
void
FFmpegDecoder::seek (ContentTime time, bool accurate)
{
- Decoder::seek (time, accurate);
- if (_decode_audio) {
- AudioDecoder::seek (time, accurate);
- }
+ VideoDecoder::seek (time, accurate);
+ AudioDecoder::seek (time, accurate);
/* If we are doing an accurate seek, our initial shot will be 200ms (200 being
a number plucked from the air) earlier than we want to end up. The loop below
shared_ptr<Image> image = i->first;
if (i->second != AV_NOPTS_VALUE) {
- video (image, false, ContentTime::from_seconds (i->second * av_q2d (_format_context->streams[_video_stream]->time_base)) + _pts_offset);
+ double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
+ video (image, rint (pts * _ffmpeg_content->video_frame_rate ()));
} else {
_log->log ("Dropping frame without PTS");
}
indicate that the previous subtitle should stop.
*/
if (sub.num_rects <= 0) {
- image_subtitle (shared_ptr<Image> (), dcpomatic::Rect<double> (), ContentTime (), ContentTime ());
+ image_subtitle (ContentTime (), ContentTime (), shared_ptr<Image> (), dcpomatic::Rect<double> ());
return;
} else if (sub.num_rects > 1) {
throw DecodeError (_("multi-part subtitles not yet supported"));
dcp::Size const vs = _ffmpeg_content->video_size ();
image_subtitle (
+ from,
+ to,
image,
dcpomatic::Rect<double> (
static_cast<double> (rect->x) / vs.width,
static_cast<double> (rect->y) / vs.height,
static_cast<double> (rect->w) / vs.width,
static_cast<double> (rect->h) / vs.height
- ),
- from,
- to
+ )
);
-
avsubtitle_free (&sub);
}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
class FFmpegDecoder : public VideoDecoder, public AudioDecoder, public SubtitleDecoder, public FFmpeg
{
public:
- FFmpegDecoder (boost::shared_ptr<const FFmpegContent>, boost::shared_ptr<Log>, bool video, bool audio, bool subtitles);
+ FFmpegDecoder (boost::shared_ptr<const FFmpegContent>, boost::shared_ptr<Log>);
~FFmpegDecoder ();
- void seek (ContentTime time, bool);
-
private:
friend class ::ffmpeg_pts_offset_test;
+ void seek (ContentTime time, bool);
bool pass ();
void flush ();
std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
boost::mutex _filter_graphs_mutex;
- bool _decode_video;
- bool _decode_audio;
- bool _decode_subtitles;
-
ContentTime _pts_offset;
};
#include "exceptions.h"
#include "scaler.h"
#include "timer.h"
+#include "rect.h"
#include "i18n.h"
using std::min;
using std::cout;
using std::cerr;
+using std::list;
using boost::shared_ptr;
using dcp::Size;
}
}
+void
+Image::make_transparent ()
+{
+ if (_pixel_format != PIX_FMT_RGBA) {
+ throw PixelFormatError ("make_transparent()", _pixel_format);
+ }
+
+ memset (data()[0], 0, lines(0) * stride()[0]);
+}
+
void
Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
{
return _aligned;
}
+PositionImage
+merge (list<PositionImage> images)
+{
+ if (images.empty ()) {
+ return PositionImage ();
+ }
+
+ dcpomatic::Rect<int> all (images.front().position, images.front().image->size().width, images.front().image->size().height);
+ for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
+ all.extend (dcpomatic::Rect<int> (i->position, i->image->size().width, i->image->size().height));
+ }
+
+ shared_ptr<Image> merged (new Image (images.front().image->pixel_format (), dcp::Size (all.width, all.height), true));
+ merged->make_transparent ();
+ for (list<PositionImage>::const_iterator i = images.begin(); i != images.end(); ++i) {
+ merged->alpha_blend (i->image, i->position);
+ }
+
+ return PositionImage (merged, all.position ());
+}
#include <dcp/image.h>
#include "util.h"
#include "position.h"
+#include "position_image.h"
class Scaler;
boost::shared_ptr<Image> crop_scale_window (Crop c, dcp::Size, dcp::Size, Scaler const *, AVPixelFormat, bool aligned) const;
void make_black ();
+ void make_transparent ();
void alpha_blend (boost::shared_ptr<const Image> image, Position<int> pos);
void copy (boost::shared_ptr<const Image> image, Position<int> pos);
bool _aligned;
};
+extern PositionImage merge (std::list<PositionImage> images);
+
#endif
bool
ImageDecoder::pass ()
{
- if (_video_position >= _image_content->video_length ()) {
+ if (_video_position >= _image_content->video_length().frames (_image_content->video_frame_rate ())) {
return true;
}
if (_image && _image_content->still ()) {
- video (_image, true, _video_position);
- _video_position += ContentTime::from_frames (1, _image_content->video_frame_rate ());
+ video (_image, _video_position);
+ ++_video_position;
return false;
}
Magick::Image* magick_image = 0;
- boost::filesystem::path const path = _image_content->path (
- _image_content->still() ? 0 : _video_position.frames (_image_content->video_frame_rate ())
- );
+ boost::filesystem::path const path = _image_content->path (_image_content->still() ? 0 : _video_position);
try {
magick_image = new Magick::Image (path.string ());
delete magick_image;
- video (_image, false, _video_position);
- _video_position += ContentTime::from_frames (1, _image_content->video_frame_rate ());
+ video (_image, _video_position);
+ ++_video_position;
return false;
}
void
ImageDecoder::seek (ContentTime time, bool accurate)
{
- Decoder::seek (time, accurate);
- _video_position = time;
+ VideoDecoder::seek (time, accurate);
+ _video_position = time.frames (_image_content->video_frame_rate ());
}
class ImageDecoder : public VideoDecoder
{
public:
- ImageDecoder (boost::shared_ptr<const ImageContent>);
+ ImageDecoder (boost::shared_ptr<const ImageContent> c);
boost::shared_ptr<const ImageContent> content () {
return _image_content;
boost::shared_ptr<const ImageContent> _image_content;
boost::shared_ptr<Image> _image;
- ContentTime _video_position;
+ VideoFrame _video_position;
};
/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "log.h"
#include "scaler.h"
#include "render_subtitles.h"
+#include "dcp_video.h"
+#include "config.h"
+#include "content_video.h"
using std::list;
using std::cout;
using std::min;
using std::max;
+using std::min;
using std::vector;
using std::pair;
using std::map;
+using std::make_pair;
using boost::shared_ptr;
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
using boost::optional;
-class Piece
-{
-public:
- Piece (shared_ptr<Content> c, shared_ptr<Decoder> d, FrameRateChange f)
- : content (c)
- , decoder (d)
- , frc (f)
- {}
-
- shared_ptr<Content> content;
- shared_ptr<Decoder> decoder;
- FrameRateChange frc;
-};
-
Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
: _film (f)
, _playlist (p)
- , _video (true)
- , _audio (true)
, _have_valid_pieces (false)
- , _video_position (0)
- , _audio_position (0)
- , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
- , _last_emit_was_black (false)
- , _just_did_inaccurate_seek (false)
, _approximate_size (false)
+ , _burn_subtitles (false)
{
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
_playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
set_video_container_size (_film->frame_size ());
}
-void
-Player::disable_video ()
-{
- _video = false;
-}
-
-void
-Player::disable_audio ()
-{
- _audio = false;
-}
-
-bool
-Player::pass ()
-{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
-
- /* Interrogate all our pieces to find the one with the earliest decoded data */
-
- shared_ptr<Piece> earliest_piece;
- shared_ptr<Decoded> earliest_decoded;
- DCPTime earliest_time = DCPTime::max ();
- DCPTime earliest_audio = DCPTime::max ();
-
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
-
- DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start();
-
- bool done = false;
- shared_ptr<Decoded> dec;
- while (!done) {
- dec = (*i)->decoder->peek ();
- if (!dec) {
- /* Decoder has nothing else to give us */
- break;
- }
-
- dec->set_dcp_times ((*i)->frc, offset);
- DCPTime const t = dec->dcp_time - offset;
- if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
- /* In the end-trimmed part; decoder has nothing else to give us */
- dec.reset ();
- done = true;
- } else if (t >= (*i)->content->trim_start ()) {
- /* Within the un-trimmed part; everything's ok */
- done = true;
- } else {
- /* Within the start-trimmed part; get something else */
- (*i)->decoder->consume ();
- }
- }
-
- if (!dec) {
- continue;
- }
-
- if (dec->dcp_time < earliest_time) {
- earliest_piece = *i;
- earliest_decoded = dec;
- earliest_time = dec->dcp_time;
- }
-
- if (dynamic_pointer_cast<DecodedAudio> (dec) && dec->dcp_time < earliest_audio) {
- earliest_audio = dec->dcp_time;
- }
- }
-
- if (!earliest_piece) {
- flush ();
- return true;
- }
-
- if (earliest_audio != DCPTime::max ()) {
- if (earliest_audio.get() < 0) {
- earliest_audio = DCPTime ();
- }
- TimedAudioBuffers tb = _audio_merger.pull (earliest_audio);
- Audio (tb.audio, tb.time);
- /* This assumes that the audio-frames-to-time conversion is exact
- so that there are no accumulated errors caused by rounding.
- */
- _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
- }
-
- /* Emit the earliest thing */
-
- shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
- shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
- shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
- shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
-
- /* Will be set to false if we shouldn't consume the peeked DecodedThing */
- bool consume = true;
-
- if (dv && _video) {
-
- if (_just_did_inaccurate_seek) {
-
- /* Just emit; no subtlety */
- emit_video (earliest_piece, dv);
- step_video_position (dv);
-
- } else if (dv->dcp_time > _video_position) {
-
- /* Too far ahead */
-
- list<shared_ptr<Piece> >::iterator i = _pieces.begin();
- while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
- ++i;
- }
-
- if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
- /* We're outside all video content */
- emit_black ();
- _statistics.video.black++;
- } else {
- /* We're inside some video; repeat the frame */
- _last_incoming_video.video->dcp_time = _video_position;
- emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
- step_video_position (_last_incoming_video.video);
- _statistics.video.repeat++;
- }
-
- consume = false;
-
- } else if (dv->dcp_time == _video_position) {
- /* We're ok */
- emit_video (earliest_piece, dv);
- step_video_position (dv);
- _statistics.video.good++;
- } else {
- /* Too far behind: skip */
- _statistics.video.skip++;
- }
-
- _just_did_inaccurate_seek = false;
-
- } else if (da && _audio) {
-
- if (da->dcp_time > _audio_position) {
- /* Too far ahead */
- emit_silence (da->dcp_time - _audio_position);
- consume = false;
- _statistics.audio.silence += (da->dcp_time - _audio_position);
- } else if (da->dcp_time == _audio_position) {
- /* We're ok */
- emit_audio (earliest_piece, da);
- _statistics.audio.good += da->data->frames();
- } else {
- /* Too far behind: skip */
- _statistics.audio.skip += da->data->frames();
- }
-
- } else if (dis && _video) {
- _image_subtitle.piece = earliest_piece;
- _image_subtitle.subtitle = dis;
- update_subtitle_from_image ();
- } else if (dts && _video) {
- _text_subtitle.piece = earliest_piece;
- _text_subtitle.subtitle = dts;
- update_subtitle_from_text ();
- }
-
- if (consume) {
- earliest_piece->decoder->consume ();
- }
-
- return false;
-}
-
-void
-Player::emit_video (weak_ptr<Piece> weak_piece, shared_ptr<DecodedVideo> video)
-{
- /* Keep a note of what came in so that we can repeat it if required */
- _last_incoming_video.weak_piece = weak_piece;
- _last_incoming_video.video = video;
-
- shared_ptr<Piece> piece = weak_piece.lock ();
- if (!piece) {
- return;
- }
-
- shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
- assert (content);
-
- FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate());
-
- dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
- if (_approximate_size) {
- image_size.width &= ~3;
- image_size.height &= ~3;
- }
-
- shared_ptr<PlayerImage> pi (
- new PlayerImage (
- video->image,
- content->crop(),
- image_size,
- _video_container_size,
- _film->scaler()
- )
- );
-
- if (
- _film->with_subtitles () &&
- _out_subtitle.image &&
- video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to
- ) {
-
- Position<int> const container_offset (
- (_video_container_size.width - image_size.width) / 2,
- (_video_container_size.height - image_size.height) / 2
- );
-
- pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
- }
-
-
-#ifdef DCPOMATIC_DEBUG
- _last_video = piece->content;
-#endif
-
- Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time);
-
- _last_emit_was_black = false;
-}
-
-void
-Player::step_video_position (shared_ptr<DecodedVideo> video)
-{
- /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
- if (video->eyes != EYES_LEFT) {
- /* This assumes that the video-frames-to-time conversion is exact
- so that there are no accumulated errors caused by rounding.
- */
- _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
- }
-}
-
-void
-Player::emit_audio (weak_ptr<Piece> weak_piece, shared_ptr<DecodedAudio> audio)
-{
- shared_ptr<Piece> piece = weak_piece.lock ();
- if (!piece) {
- return;
- }
-
- shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
- assert (content);
-
- /* Gain */
- if (content->audio_gain() != 0) {
- shared_ptr<AudioBuffers> gain (new AudioBuffers (audio->data));
- gain->apply_gain (content->audio_gain ());
- audio->data = gain;
- }
-
- /* Remap channels */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames()));
- dcp_mapped->make_silent ();
- AudioMapping map = content->audio_mapping ();
- for (int i = 0; i < map.content_channels(); ++i) {
- for (int j = 0; j < _film->audio_channels(); ++j) {
- if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
- dcp_mapped->accumulate_channel (
- audio->data.get(),
- i,
- static_cast<dcp::Channel> (j),
- map.get (i, static_cast<dcp::Channel> (j))
- );
- }
- }
- }
-
- audio->data = dcp_mapped;
-
- /* Delay */
- audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
- if (audio->dcp_time < DCPTime (0)) {
- int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
- if (frames >= audio->data->frames ()) {
- return;
- }
-
- shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames));
- trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
-
- audio->data = trimmed;
- audio->dcp_time = DCPTime ();
- }
-
- _audio_merger.push (audio->data, audio->dcp_time);
-}
-
-void
-Player::flush ()
-{
- TimedAudioBuffers tb = _audio_merger.flush ();
- if (_audio && tb.audio) {
- Audio (tb.audio, tb.time);
- _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
- }
-
- while (_video && _video_position < _audio_position) {
- emit_black ();
- }
-
- while (_audio && _audio_position < _video_position) {
- emit_silence (_video_position - _audio_position);
- }
-}
-
-/** Seek so that the next pass() will yield (approximately) the requested frame.
- * Pass accurate = true to try harder to get close to the request.
- * @return true on error
- */
-void
-Player::seek (DCPTime t, bool accurate)
-{
- if (!_have_valid_pieces) {
- setup_pieces ();
- }
-
- if (_pieces.empty ()) {
- return;
- }
-
- for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
- /* s is the offset of t from the start position of this content */
- DCPTime s = t - (*i)->content->position ();
- s = max (static_cast<DCPTime> (0), s);
- s = min ((*i)->content->length_after_trim(), s);
-
- /* Convert this to the content time */
- ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
-
- /* And seek the decoder */
- (*i)->decoder->seek (ct, accurate);
- }
-
- _video_position = t.round_up (_film->video_frame_rate());
- _audio_position = t.round_up (_film->audio_frame_rate());
-
- _audio_merger.clear (_audio_position);
-
- if (!accurate) {
- /* We just did an inaccurate seek, so it's likely that the next thing seen
- out of pass() will be a fair distance from _{video,audio}_position. Setting
- this flag stops pass() from trying to fix that: we assume that if it
- was an inaccurate seek then the caller does not care too much about
- inserting black/silence to keep the time tidy.
- */
- _just_did_inaccurate_seek = true;
- }
-}
-
void
Player::setup_pieces ()
{
/* FFmpeg */
shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
if (fc) {
- decoder.reset (new FFmpegDecoder (fc, _film->log(), _video, _audio, _film->with_subtitles ()));
+ decoder.reset (new FFmpegDecoder (fc, _film->log()));
frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
}
frc = best_overlap_frc;
}
- ContentTime st ((*i)->trim_start(), frc.get ());
- decoder->seek (st, true);
-
_pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
}
_have_valid_pieces = true;
-
- /* The Piece for the _last_incoming_video will no longer be valid */
- _last_incoming_video.video.reset ();
-
- _video_position = DCPTime ();
- _audio_position = DCPTime ();
}
void
}
if (
- property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
- property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END ||
- property == VideoContentProperty::VIDEO_FRAME_TYPE
+ property == ContentProperty::POSITION ||
+ property == ContentProperty::LENGTH ||
+ property == ContentProperty::TRIM_START ||
+ property == ContentProperty::TRIM_END ||
+ property == ContentProperty::PATH ||
+ property == VideoContentProperty::VIDEO_FRAME_TYPE
) {
_have_valid_pieces = false;
} else if (
property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
- property == SubtitleContentProperty::SUBTITLE_SCALE
- ) {
-
- update_subtitle_from_image ();
- update_subtitle_from_text ();
- Changed (frequent);
-
- } else if (
- property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
+ property == SubtitleContentProperty::SUBTITLE_SCALE ||
+ property == VideoContentProperty::VIDEO_CROP ||
+ property == VideoContentProperty::VIDEO_SCALE ||
property == VideoContentProperty::VIDEO_FRAME_RATE
) {
Changed (frequent);
-
- } else if (property == ContentProperty::PATH) {
-
- _have_valid_pieces = false;
- Changed (frequent);
}
}
{
_video_container_size = s;
- shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
- im->make_black ();
-
- _black_frame.reset (
- new PlayerImage (
- im,
- Crop(),
- _video_container_size,
- _video_container_size,
- Scaler::from_id ("bicubic")
- )
- );
-}
-
-void
-Player::emit_black ()
-{
-#ifdef DCPOMATIC_DEBUG
- _last_video.reset ();
-#endif
-
- Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
- _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
- _last_emit_was_black = true;
-}
-
-void
-Player::emit_silence (DCPTime most)
-{
- if (most == DCPTime ()) {
- return;
- }
-
- DCPTime t = min (most, DCPTime::from_seconds (0.5));
- shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
- silence->make_silent ();
- Audio (silence, _audio_position);
-
- _audio_position += t;
+ _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
+ _black_image->make_black ();
}
void
}
}
-void
-Player::update_subtitle_from_image ()
+list<PositionImage>
+Player::process_content_image_subtitles (shared_ptr<SubtitleContent> content, list<shared_ptr<ContentImageSubtitle> > subs)
{
- shared_ptr<Piece> piece = _image_subtitle.piece.lock ();
- if (!piece) {
- return;
- }
+ list<PositionImage> all;
+
+ for (list<shared_ptr<ContentImageSubtitle> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ if (!(*i)->image) {
+ continue;
+ }
- if (!_image_subtitle.subtitle->image) {
- _out_subtitle.image.reset ();
- return;
+ dcpomatic::Rect<double> in_rect = (*i)->rectangle;
+ dcp::Size scaled_size;
+
+ in_rect.x += content->subtitle_x_offset ();
+ in_rect.y += content->subtitle_y_offset ();
+
+ /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
+ scaled_size.width = in_rect.width * _video_container_size.width * content->subtitle_scale ();
+ scaled_size.height = in_rect.height * _video_container_size.height * content->subtitle_scale ();
+
+ /* Then we need a corrective translation, consisting of two parts:
+ *
+ * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
+ * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
+ *
+ * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
+ * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
+ * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
+ *
+ * Combining these two translations gives these expressions.
+ */
+
+ all.push_back (
+ PositionImage (
+ (*i)->image->scale (
+ scaled_size,
+ Scaler::from_id ("bicubic"),
+ (*i)->image->pixel_format (),
+ true
+ ),
+ Position<int> (
+ rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - content->subtitle_scale ()) / 2))),
+ rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - content->subtitle_scale ()) / 2)))
+ )
+ )
+ );
}
- shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
- assert (sc);
-
- dcpomatic::Rect<double> in_rect = _image_subtitle.subtitle->rect;
- dcp::Size scaled_size;
-
- in_rect.x += sc->subtitle_x_offset ();
- in_rect.y += sc->subtitle_y_offset ();
-
- /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
- scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
- scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
-
- /* Then we need a corrective translation, consisting of two parts:
- *
- * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
- * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
- *
- * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
- * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
- * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
- *
- * Combining these two translations gives these expressions.
- */
-
- _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
- _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
-
- _out_subtitle.image = _image_subtitle.subtitle->image->scale (
- scaled_size,
- Scaler::from_id ("bicubic"),
- _image_subtitle.subtitle->image->pixel_format (),
- true
- );
-
- _out_subtitle.from = _image_subtitle.subtitle->dcp_time + piece->content->position ();
- _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to + piece->content->position ();
+ return all;
}
-/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
- * @return false if this could not be done.
- */
-bool
-Player::repeat_last_video ()
+list<PositionImage>
+Player::process_content_text_subtitles (list<shared_ptr<ContentTextSubtitle> > sub)
{
- if (!_last_incoming_video.video || !_have_valid_pieces) {
- return false;
+ list<PositionImage> all;
+ for (list<shared_ptr<ContentTextSubtitle> >::const_iterator i = sub.begin(); i != sub.end(); ++i) {
+ if (!(*i)->subs.empty ()) {
+ all.push_back (render_subtitles ((*i)->subs, _video_container_size));
+ }
}
- emit_video (
- _last_incoming_video.weak_piece,
- _last_incoming_video.video
- );
-
- return true;
+ return all;
}
void
-Player::update_subtitle_from_text ()
+Player::set_approximate_size ()
{
- if (_text_subtitle.subtitle->subs.empty ()) {
- _out_subtitle.image.reset ();
- return;
+ _approximate_size = true;
+}
+
+shared_ptr<DCPVideo>
+Player::get_video (DCPTime time, bool accurate)
+{
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ }
+
+ list<shared_ptr<Piece> > ov = overlaps<VideoContent> (time);
+ if (ov.empty ()) {
+ /* No video content at this time: return a black frame */
+ return shared_ptr<DCPVideo> (
+ new DCPVideo (
+ _black_image,
+ EYES_BOTH,
+ Crop (),
+ _video_container_size,
+ _video_container_size,
+ Scaler::from_id ("bicubic"),
+ Config::instance()->colour_conversions().front().conversion,
+ time
+ )
+ );
+ }
+
+ /* Create a DCPVideo from the content's video at this time */
+
+ shared_ptr<Piece> piece = ov.back ();
+ shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
+ assert (decoder);
+ shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
+ assert (content);
+
+ shared_ptr<ContentVideo> dec = decoder->get_video (dcp_to_content_video (piece, time), accurate);
+
+ dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
+ if (_approximate_size) {
+ image_size.width &= ~3;
+ image_size.height &= ~3;
+ }
+
+ shared_ptr<DCPVideo> dcp_video (
+ new DCPVideo (
+ dec->image,
+ dec->eyes,
+ content->crop (),
+ image_size,
+ _video_container_size,
+ _film->scaler(),
+ content->colour_conversion (),
+ time
+ )
+ );
+
+ /* Add subtitles */
+
+ ov = overlaps<SubtitleContent> (time);
+ list<PositionImage> sub_images;
+
+ for (list<shared_ptr<Piece> >::const_iterator i = ov.begin(); i != ov.end(); ++i) {
+ shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*i)->decoder);
+ shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*i)->content);
+ ContentTime const from = dcp_to_content_subtitle (*i, time);
+ ContentTime const to = from + ContentTime::from_frames (1, content->video_frame_rate ());
+
+ list<shared_ptr<ContentImageSubtitle> > image_subtitles = subtitle_decoder->get_image_subtitles (from, to);
+ if (!image_subtitles.empty ()) {
+ list<PositionImage> im = process_content_image_subtitles (
+ subtitle_content,
+ image_subtitles
+ );
+
+ copy (im.begin(), im.end(), back_inserter (sub_images));
+ }
+
+ if (_burn_subtitles) {
+ list<shared_ptr<ContentTextSubtitle> > text_subtitles = subtitle_decoder->get_text_subtitles (from, to);
+ if (!text_subtitles.empty ()) {
+ list<PositionImage> im = process_content_text_subtitles (text_subtitles);
+ copy (im.begin(), im.end(), back_inserter (sub_images));
+ }
+ }
}
- render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position);
+ if (!sub_images.empty ()) {
+ dcp_video->set_subtitle (merge (sub_images));
+ }
+
+ return dcp_video;
}
-void
-Player::set_approximate_size ()
+shared_ptr<AudioBuffers>
+Player::get_audio (DCPTime time, DCPTime length, bool accurate)
{
- _approximate_size = true;
+ if (!_have_valid_pieces) {
+ setup_pieces ();
+ }
+
+ AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
+
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
+ audio->make_silent ();
+
+ list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time);
+ if (ov.empty ()) {
+ return audio;
+ }
+
+ for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
+
+ shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
+ assert (content);
+ shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
+ assert (decoder);
+
+ AudioFrame const content_time = dcp_to_content_audio (*i, time);
+
+ /* Audio from this piece's decoder (which might be more than what we asked for) */
+ shared_ptr<ContentAudio> all = decoder->get_audio (content_time, length_frames, accurate);
+
+ /* Gain */
+ if (content->audio_gain() != 0) {
+ shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
+ gain->apply_gain (content->audio_gain ());
+ all->audio = gain;
+ }
+
+ /* Remap channels */
+ shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
+ dcp_mapped->make_silent ();
+ AudioMapping map = content->audio_mapping ();
+ for (int i = 0; i < map.content_channels(); ++i) {
+ for (int j = 0; j < _film->audio_channels(); ++j) {
+ if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
+ dcp_mapped->accumulate_channel (
+ all->audio.get(),
+ i,
+ j,
+ map.get (i, static_cast<dcp::Channel> (j))
+ );
+ }
+ }
+ }
+
+ all->audio = dcp_mapped;
+
+ /* Delay */
+ /* XXX
+ audio->dcp_time += content->audio_delay() * TIME_HZ / 1000;
+ if (audio->dcp_time < 0) {
+ int const frames = - audio->dcp_time * _film->audio_frame_rate() / TIME_HZ;
+ if (frames >= audio->audio->frames ()) {
+ return;
+ }
+
+ shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->audio->channels(), audio->audio->frames() - frames));
+ trimmed->copy_from (audio->audio.get(), audio->audio->frames() - frames, frames, 0);
+
+ audio->audio = trimmed;
+ audio->dcp_time = 0;
+ }
+ */
+
+ audio->accumulate_frames (all->audio.get(), all->frame - content_time, 0, min (AudioFrame (all->audio->frames()), length_frames));
+ }
+
+ return audio;
}
-
-PlayerImage::PlayerImage (
- shared_ptr<const Image> in,
- Crop crop,
- dcp::Size inter_size,
- dcp::Size out_size,
- Scaler const * scaler
- )
- : _in (in)
- , _crop (crop)
- , _inter_size (inter_size)
- , _out_size (out_size)
- , _scaler (scaler)
+
+VideoFrame
+Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
{
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
+ /* Convert this to the content frame */
+ return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
}
-void
-PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
+AudioFrame
+Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
- _subtitle_image = image;
- _subtitle_position = pos;
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
+
+ /* Convert this to the content frame */
+ return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
}
-shared_ptr<Image>
-PlayerImage::image (AVPixelFormat format, bool aligned)
+ContentTime
+Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
{
- shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned);
-
- Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
-
- if (_subtitle_image) {
- out->alpha_blend (_subtitle_image, _subtitle_position);
- }
+ /* s is the offset of t from the start position of this content */
+ DCPTime s = t - piece->content->position ();
+ s = DCPTime (max (int64_t (0), s.get ()));
+ s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
- return out;
+ return ContentTime (s, piece->frc);
}
void
#include "content.h"
#include "film.h"
#include "rect.h"
-#include "audio_merger.h"
#include "audio_content.h"
-#include "decoded.h"
+#include "dcpomatic_time.h"
+#include "content_subtitle.h"
+#include "position_image.h"
class Job;
class Film;
class AudioContent;
class Piece;
class Image;
-
-/** @class PlayerImage
- * @brief A wrapper for an Image which contains some pending operations; these may
- * not be necessary if the receiver of the PlayerImage throws it away.
- */
-class PlayerImage
-{
-public:
- PlayerImage (boost::shared_ptr<const Image>, Crop, dcp::Size, dcp::Size, Scaler const *);
-
- void set_subtitle (boost::shared_ptr<const Image>, Position<int>);
-
- boost::shared_ptr<Image> image (AVPixelFormat, bool);
-
-private:
- boost::shared_ptr<const Image> _in;
- Crop _crop;
- dcp::Size _inter_size;
- dcp::Size _out_size;
- Scaler const * _scaler;
- boost::shared_ptr<const Image> _subtitle_image;
- Position<int> _subtitle_position;
-};
+class DCPVideo;
+class Decoder;
class PlayerStatistics
{
void dump (boost::shared_ptr<Log>) const;
};
-
+
+class Piece
+{
+public:
+ Piece (boost::shared_ptr<Content> c, boost::shared_ptr<Decoder> d, FrameRateChange f)
+ : content (c)
+ , decoder (d)
+ , frc (f)
+ {}
+
+ boost::shared_ptr<Content> content;
+ boost::shared_ptr<Decoder> decoder;
+ FrameRateChange frc;
+};
+
/** @class Player
- * @brief A class which can `play' a Playlist; emitting its audio and video.
+ * @brief A class which can `play' a Playlist.
*/
-
class Player : public boost::enable_shared_from_this<Player>, public boost::noncopyable
{
public:
Player (boost::shared_ptr<const Film>, boost::shared_ptr<const Playlist>);
- void disable_video ();
- void disable_audio ();
-
- bool pass ();
- void seek (DCPTime, bool);
-
- DCPTime video_position () const {
- return _video_position;
- }
+ boost::shared_ptr<DCPVideo> get_video (DCPTime time, bool accurate);
+ boost::shared_ptr<AudioBuffers> get_audio (DCPTime time, DCPTime length, bool accurate);
void set_video_container_size (dcp::Size);
void set_approximate_size ();
-
- bool repeat_last_video ();
+ void set_burn_subtitles (bool burn) {
+ _burn_subtitles = burn;
+ }
PlayerStatistics const & statistics () const;
- /** Emitted when a video frame is ready.
- * First parameter is the video image.
- * Second parameter is the eye(s) that should see this image.
- * Third parameter is the colour conversion that should be used for this image.
- * Fourth parameter is true if the image is the same as the last one that was emitted.
- * Fifth parameter is the time.
- */
- boost::signals2::signal<void (boost::shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime)> Video;
-
- /** Emitted when some audio data is ready */
- boost::signals2::signal<void (boost::shared_ptr<const AudioBuffers>, DCPTime)> Audio;
-
/** Emitted when something has changed such that if we went back and emitted
* the last frame again it would look different. This is not emitted after
* a seek.
void setup_pieces ();
void playlist_changed ();
void content_changed (boost::weak_ptr<Content>, int, bool);
- void do_seek (DCPTime, bool);
void flush ();
- void emit_black ();
- void emit_silence (DCPTime);
void film_changed (Film::Property);
- void update_subtitle_from_image ();
+ std::list<PositionImage> process_content_image_subtitles (
+ boost::shared_ptr<SubtitleContent>, std::list<boost::shared_ptr<ContentImageSubtitle> >
+ );
+ std::list<PositionImage> process_content_text_subtitles (std::list<boost::shared_ptr<ContentTextSubtitle> >);
void update_subtitle_from_text ();
- void emit_video (boost::weak_ptr<Piece>, boost::shared_ptr<DecodedVideo>);
- void emit_audio (boost::weak_ptr<Piece>, boost::shared_ptr<DecodedAudio>);
- void step_video_position (boost::shared_ptr<DecodedVideo>);
-
+ VideoFrame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ AudioFrame dcp_to_content_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ ContentTime dcp_to_content_subtitle (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+
+ template<class C>
+ std::list<boost::shared_ptr<Piece> >
+ overlaps (DCPTime t)
+ {
+ std::list<boost::shared_ptr<Piece> > overlaps;
+ for (typename std::list<boost::shared_ptr<Piece> >::const_iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
+ if (boost::dynamic_pointer_cast<C> ((*i)->content) && (*i)->content->position() >= t && (*i)->content->end() < t) {
+ overlaps.push_back (*i);
+ }
+ }
+
+ return overlaps;
+ }
+
boost::shared_ptr<const Film> _film;
boost::shared_ptr<const Playlist> _playlist;
-
- bool _video;
- bool _audio;
/** Our pieces are ready to go; if this is false the pieces must be (re-)created before they are used */
bool _have_valid_pieces;
std::list<boost::shared_ptr<Piece> > _pieces;
- /** The time after the last video that we emitted */
- DCPTime _video_position;
- /** The time after the last audio that we emitted */
- DCPTime _audio_position;
-
- AudioMerger _audio_merger;
-
dcp::Size _video_container_size;
- boost::shared_ptr<PlayerImage> _black_frame;
-
- struct {
- boost::weak_ptr<Piece> piece;
- boost::shared_ptr<DecodedImageSubtitle> subtitle;
- } _image_subtitle;
-
- struct {
- boost::weak_ptr<Piece> piece;
- boost::shared_ptr<DecodedTextSubtitle> subtitle;
- } _text_subtitle;
-
- struct {
- Position<int> position;
- boost::shared_ptr<Image> image;
- DCPTime from;
- DCPTime to;
- } _out_subtitle;
-
-#ifdef DCPOMATIC_DEBUG
- boost::shared_ptr<Content> _last_video;
-#endif
-
- bool _last_emit_was_black;
-
- struct {
- boost::weak_ptr<Piece> weak_piece;
- boost::shared_ptr<DecodedVideo> video;
- } _last_incoming_video;
+ boost::shared_ptr<Image> _black_image;
- bool _just_did_inaccurate_seek;
bool _approximate_size;
+ bool _burn_subtitles;
PlayerStatistics _statistics;
, height (0)
{}
+ Rect (Position<T> p, T w_, T h_)
+ : x (p.x)
+ , y (p.y)
+ , width (w_)
+ , height (h_)
+ {}
+
Rect (T x_, T y_, T w_, T h_)
: x (x_)
, y (y_)
T width;
T height;
- Position<T> position () const {
+ Position<T> position () const
+ {
return Position<T> (x, y);
}
- Rect<T> intersection (Rect<T> const & other) const {
+ Rect<T> intersection (Rect<T> const & other) const
+ {
T const tx = max (x, other.x);
T const ty = max (y, other.y);
);
}
- bool contains (Position<T> p) const {
+ void extend (Rect<T> const & other)
+ {
+ x = std::min (x, other.x);
+ y = std::min (y, other.y);
+ width = std::max (x + width, other.x + other.width) - x;
+ height = std::max (y + height, other.y + other.height) - y;
+ }
+
+ bool contains (Position<T> p) const
+ {
return (p.x >= x && p.x <= (x + width) && p.y >= y && p.y <= (y + height));
}
};
using std::string;
using std::min;
using std::max;
+using std::pair;
using boost::shared_ptr;
using boost::optional;
return 0;
}
-void
-render_subtitles (list<dcp::SubtitleString> subtitles, dcp::Size target, shared_ptr<Image>& image, Position<int>& position)
+PositionImage
+render_subtitles (list<dcp::SubtitleString> subtitles, dcp::Size target)
{
if (subtitles.empty ()) {
- image.reset ();
- return;
+ return PositionImage ();
}
/* Estimate height that the subtitle image needs to be */
top = top.get() - 32;
bottom = bottom.get() + 32;
- image.reset (new Image (PIX_FMT_RGBA, dcp::Size (target.width, bottom.get() - top.get ()), false));
+ shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (target.width, bottom.get() - top.get ()), false));
image->make_black ();
Cairo::RefPtr<Cairo::ImageSurface> surface = Cairo::ImageSurface::create (
context->stroke ();
}
}
+
+ return PositionImage (image, Position<int> (0, top.get ()));
}
#include <dcp/subtitle_string.h>
#include <dcp/util.h>
-#include "position.h"
+#include "position_image.h"
-class Image;
-
-void
-render_subtitles (std::list<dcp::SubtitleString>, dcp::Size, boost::shared_ptr<Image> &, Position<int> &);
+PositionImage render_subtitles (std::list<dcp::SubtitleString>, dcp::Size);
void
SndfileDecoder::seek (ContentTime t, bool accurate)
{
- Decoder::seek (t, accurate);
AudioDecoder::seek (t, accurate);
_done = t.frames (audio_frame_rate ());
class SndfileDecoder : public AudioDecoder
{
public:
- SndfileDecoder (boost::shared_ptr<const SndfileContent>);
+ SndfileDecoder (boost::shared_ptr<const SndfileContent> c);
~SndfileDecoder ();
void seek (ContentTime, bool);
}
+void
+SubRipDecoder::seek (ContentTime time, bool)
+{
+ _next = 0;
+ list<SubRipSubtitlePiece>::const_iterator i = _subtitles[_next].pieces.begin();
+ while (i != _subtitles[_next].pieces.end() && _subtitles[_next].from < time) {
+ ++i;
+ }
+
+}
+
bool
SubRipDecoder::pass ()
{
public:
SubRipDecoder (boost::shared_ptr<const SubRipContent>);
-protected:
+protected:
+ void seek (ContentTime time, bool accurate);
bool pass ();
private:
}
-
/** Called by subclasses when an image subtitle is ready.
* Image may be 0 to say that there is no current subtitle.
*/
void
-SubtitleDecoder::image_subtitle (shared_ptr<Image> image, dcpomatic::Rect<double> rect, ContentTime from, ContentTime to)
+SubtitleDecoder::image_subtitle (ContentTime from, ContentTime to, shared_ptr<Image> image, dcpomatic::Rect<double> rect)
{
- _pending.push_back (shared_ptr<DecodedImageSubtitle> (new DecodedImageSubtitle (from, to, image, rect)));
+ _decoded_image_subtitles.push_back (shared_ptr<ContentImageSubtitle> (new ContentImageSubtitle (from, to, image, rect)));
}
void
SubtitleDecoder::text_subtitle (list<dcp::SubtitleString> s)
{
- _pending.push_back (shared_ptr<DecodedTextSubtitle> (new DecodedTextSubtitle (s)));
+ _decoded_text_subtitles.push_back (shared_ptr<ContentTextSubtitle> (new ContentTextSubtitle (s)));
+}
+
+template <class T>
+list<shared_ptr<T> >
+get (list<shared_ptr<T> > const & subs, ContentTime from, ContentTime to)
+{
+ /* XXX: inefficient */
+ list<shared_ptr<T> > out;
+ for (typename list<shared_ptr<T> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ if ((*i)->from() <= to && (*i)->to() >= from) {
+ out.push_back (*i);
+ }
+ }
+
+ return out;
+}
+
+list<shared_ptr<ContentTextSubtitle> >
+SubtitleDecoder::get_text_subtitles (ContentTime from, ContentTime to)
+{
+ return get<ContentTextSubtitle> (_decoded_text_subtitles, from, to);
+}
+
+list<shared_ptr<ContentImageSubtitle> >
+SubtitleDecoder::get_image_subtitles (ContentTime from, ContentTime to)
+{
+ return get<ContentImageSubtitle> (_decoded_image_subtitles, from, to);
}
#ifndef DCPOMATIC_SUBTITLE_DECODER_H
#define DCPOMATIC_SUBTITLE_DECODER_H
-#include <boost/signals2.hpp>
#include <dcp/subtitle_string.h>
#include "decoder.h"
#include "rect.h"
#include "types.h"
-#include "decoded.h"
+#include "content_subtitle.h"
class Film;
class DCPTimedSubtitle;
public:
SubtitleDecoder ();
+ std::list<boost::shared_ptr<ContentImageSubtitle> > get_image_subtitles (ContentTime from, ContentTime to);
+ std::list<boost::shared_ptr<ContentTextSubtitle> > get_text_subtitles (ContentTime from, ContentTime to);
+
protected:
- void image_subtitle (boost::shared_ptr<Image>, dcpomatic::Rect<double>, ContentTime, ContentTime);
+ void image_subtitle (ContentTime from, ContentTime to, boost::shared_ptr<Image>, dcpomatic::Rect<double>);
void text_subtitle (std::list<dcp::SubtitleString>);
+
+ std::list<boost::shared_ptr<ContentImageSubtitle> > _decoded_image_subtitles;
+ std::list<boost::shared_ptr<ContentTextSubtitle> > _decoded_text_subtitles;
};
#endif
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
-static void
-video_proxy (weak_ptr<Encoder> encoder, shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, bool same)
-{
- shared_ptr<Encoder> e = encoder.lock ();
- if (e) {
- e->process_video (image, eyes, conversion, same);
- }
-}
-
-static void
-audio_proxy (weak_ptr<Encoder> encoder, shared_ptr<const AudioBuffers> audio)
-{
- shared_ptr<Encoder> e = encoder.lock ();
- if (e) {
- e->process_audio (audio);
- }
-}
-
/** Construct a transcoder using a Decoder that we create and a supplied Encoder.
* @param f Film that we are transcoding.
* @param e Encoder to use.
, _encoder (new Encoder (f, j))
, _finishing (false)
{
- _player->Video.connect (bind (video_proxy, _encoder, _1, _2, _3, _4));
- _player->Audio.connect (bind (audio_proxy, _encoder, _1));
+
}
void
Transcoder::go ()
{
_encoder->process_begin ();
- while (!_player->pass ()) {}
+
+ DCPTime const frame = DCPTime::from_frames (1, _film->video_frame_rate ());
+ for (DCPTime t; t < _film->length(); t += frame) {
+ _encoder->process_video (_player->get_video (t, true));
+ _encoder->process_audio (_player->get_audio (t, frame, true));
+ }
_finishing = true;
_encoder->process_end ();
#include <boost/shared_ptr.hpp>
#include <dcp/util.h>
#include "dcpomatic_time.h"
+#include "position.h"
class Content;
class VideoContent;
typedef std::vector<boost::shared_ptr<SubtitleContent> > SubtitleContentList;
typedef std::vector<boost::shared_ptr<FFmpegContent> > FFmpegContentList;
+typedef int64_t VideoFrame;
+typedef int64_t AudioFrame;
+
+/* XXX -> DCPAudio */
struct TimedAudioBuffers
{
TimedAudioBuffers ()
#include <glib.h>
#include <openjpeg.h>
#include <openssl/md5.h>
+#include <pangomm/init.h>
#include <magick/MagickCore.h>
#include <magick/version.h>
-#include <pangomm/init.h>
#include <dcp/version.h>
#include <dcp/util.h>
#include <dcp/signer_chain.h>
#include "job.h"
#include "cross.h"
#include "video_content.h"
+#include "rect.h"
#ifdef DCPOMATIC_WINDOWS
#include "stack.hpp"
#endif
return s.str ();
}
-/** Return a user-readable string summarising the versions of our dependencies */
-string
-dependency_version_summary ()
-{
- stringstream s;
- s << N_("libopenjpeg ") << opj_version () << N_(", ")
- << N_("libavcodec ") << ffmpeg_version_to_string (avcodec_version()) << N_(", ")
- << N_("libavfilter ") << ffmpeg_version_to_string (avfilter_version()) << N_(", ")
- << N_("libavformat ") << ffmpeg_version_to_string (avformat_version()) << N_(", ")
- << N_("libavutil ") << ffmpeg_version_to_string (avutil_version()) << N_(", ")
- << N_("libswscale ") << ffmpeg_version_to_string (swscale_version()) << N_(", ")
- << MagickVersion << N_(", ")
- << N_("libssh ") << ssh_version (0) << N_(", ")
- << N_("libdcp ") << dcp::version << N_(" git ") << dcp::git_commit;
-
- return s.str ();
-}
-
double
seconds (struct timeval t)
{
return a / b;
}
}
+
+/** Return a user-readable string summarising the versions of our dependencies */
+string
+dependency_version_summary ()
+{
+ stringstream s;
+ s << N_("libopenjpeg ") << opj_version () << N_(", ")
+ << N_("libavcodec ") << ffmpeg_version_to_string (avcodec_version()) << N_(", ")
+ << N_("libavfilter ") << ffmpeg_version_to_string (avfilter_version()) << N_(", ")
+ << N_("libavformat ") << ffmpeg_version_to_string (avformat_version()) << N_(", ")
+ << N_("libavutil ") << ffmpeg_version_to_string (avutil_version()) << N_(", ")
+ << N_("libswscale ") << ffmpeg_version_to_string (swscale_version()) << N_(", ")
+ << MagickVersion << N_(", ")
+ << N_("libssh ") << ssh_version (0) << N_(", ")
+ << N_("libdcp ") << dcp::version << N_(" git ") << dcp::git_commit;
+
+ return s.str ();
+}
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "video_decoder.h"
#include "image.h"
+#include "content_video.h"
#include "i18n.h"
using std::cout;
+using std::list;
using boost::shared_ptr;
using boost::optional;
}
+shared_ptr<ContentVideo>
+VideoDecoder::decoded_video (VideoFrame frame)
+{
+ for (list<shared_ptr<ContentVideo> >::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
+ if ((*i)->frame == frame) {
+ return *i;
+ }
+ }
+
+ return shared_ptr<ContentVideo> ();
+}
+
+shared_ptr<ContentVideo>
+VideoDecoder::get_video (VideoFrame frame, bool accurate)
+{
+ if (_decoded_video.empty() || (frame < _decoded_video.front()->frame || frame > (_decoded_video.back()->frame + 1))) {
+ /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
+ seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
+ }
+
+ shared_ptr<ContentVideo> dec;
+
+ /* Now enough pass() calls will either:
+ * (a) give us what we want, or
+ * (b) hit the end of the decoder.
+ */
+ if (accurate) {
+ /* We are being accurate, so we want the right frame */
+ while (!decoded_video (frame) && !pass ()) {}
+ dec = decoded_video (frame);
+ } else {
+ /* Any frame will do: use the first one that comes out of pass() */
+ while (_decoded_video.empty() && !pass ()) {}
+ if (!_decoded_video.empty ()) {
+ dec = _decoded_video.front ();
+ }
+ }
+
+ /* Clean up decoded_video */
+ while (!_decoded_video.empty() && _decoded_video.front()->frame < (frame - 1)) {
+ _decoded_video.pop_front ();
+ }
+
+ return dec;
+}
+
+
/** Called by subclasses when they have a video frame ready */
void
-VideoDecoder::video (shared_ptr<const Image> image, bool same, ContentTime time)
+VideoDecoder::video (shared_ptr<const Image> image, VideoFrame frame)
{
switch (_video_content->video_frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image, EYES_BOTH, same)));
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image, EYES_BOTH, frame)));
break;
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- Video (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, same);
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image, (frame % 2) ? EYES_RIGHT : EYES_LEFT, frame)));
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
{
int const half = image->size().width / 2;
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, same)));
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, same)));
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, frame)));
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, frame)));
break;
}
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
{
int const half = image->size().height / 2;
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, same)));
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, same)));
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, frame)));
+ _decoded_video.push_back (shared_ptr<ContentVideo> (new ContentVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, frame)));
break;
}
default:
assert (false);
}
}
+
+void
+VideoDecoder::seek (ContentTime, bool)
+{
+ _decoded_video.clear ();
+}
+
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "decoder.h"
#include "video_content.h"
#include "util.h"
-#include "decoded.h"
class VideoContent;
class Image;
+class ContentVideo;
class VideoDecoder : public virtual Decoder
{
public:
- VideoDecoder (boost::shared_ptr<const VideoContent>);
+ VideoDecoder (boost::shared_ptr<const VideoContent> c);
+
+ boost::shared_ptr<ContentVideo> get_video (VideoFrame frame, bool accurate);
boost::shared_ptr<const VideoContent> video_content () const {
return _video_content;
protected:
- void video (boost::shared_ptr<const Image>, bool, ContentTime);
+ void seek (ContentTime time, bool accurate);
+ void video (boost::shared_ptr<const Image>, VideoFrame frame);
+ boost::shared_ptr<ContentVideo> decoded_video (VideoFrame frame);
+
boost::shared_ptr<const VideoContent> _video_content;
+ std::list<boost::shared_ptr<ContentVideo> > _decoded_video;
};
#endif
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
, _last_written_eyes (EYES_RIGHT)
, _full_written (0)
, _fake_written (0)
- , _repeat_written (0)
, _pushed_to_disk (0)
{
/* Remove any old DCP */
_last_written[qi.eyes].reset ();
++_fake_written;
break;
- case QueueItem::REPEAT:
- {
- _film->log()->log (String::compose (N_("Writer REPEAT-writes %1 to MXF"), qi.frame));
- dcp::FrameInfo fin = _picture_mxf_writer->write (
- _last_written[qi.eyes]->data(),
- _last_written[qi.eyes]->size()
- );
-
- _last_written[qi.eyes]->write_info (_film, qi.frame, qi.eyes, fin);
- ++_repeat_written;
- break;
- }
}
lock.lock ();
total *= 2;
}
if (total) {
- job->set_progress (float (_full_written + _fake_written + _repeat_written) / total);
+ job->set_progress (float (_full_written + _fake_written) / total);
}
}
dcp.write_xml (_film->interop () ? dcp::INTEROP : dcp::SMPTE, meta, _film->is_signed() ? make_signer () : shared_ptr<const dcp::Signer> ());
_film->log()->log (
- String::compose (N_("Wrote %1 FULL, %2 FAKE, %3 REPEAT; %4 pushed to disk"), _full_written, _fake_written, _repeat_written, _pushed_to_disk)
+ String::compose (N_("Wrote %1 FULL, %2 FAKE, %3 pushed to disk"), _full_written, _fake_written, _pushed_to_disk)
);
}
-/** Tell the writer that frame `f' should be a repeat of the frame before it */
-void
-Writer::repeat (int f, Eyes e)
-{
- boost::mutex::scoped_lock lock (_mutex);
-
- while (_queued_full_in_memory > _maximum_frames_in_memory) {
- _full_condition.wait (lock);
- }
-
- QueueItem qi;
- qi.type = QueueItem::REPEAT;
- qi.frame = f;
- if (_film->three_d() && e == EYES_BOTH) {
- qi.eyes = EYES_LEFT;
- _queue.push_back (qi);
- qi.eyes = EYES_RIGHT;
- _queue.push_back (qi);
- } else {
- qi.eyes = e;
- _queue.push_back (qi);
- }
-
- _empty_condition.notify_all ();
-}
-
bool
Writer::check_existing_picture_mxf_frame (FILE* mxf, int f, Eyes eyes)
{
int _full_written;
/** number of FAKE written frames */
int _fake_written;
- /** number of REPEAT written frames */
- int _repeat_written;
/** number of frames pushed to disk and then recovered
due to the limit of frames to be held in memory.
*/
audio_content.cc
audio_decoder.cc
audio_mapping.cc
- audio_merger.cc
cinema.cc
colour_conversion.cc
config.cc
content.cc
content_factory.cc
+ content_subtitle.cc
cross.cc
dci_metadata.cc
dcp_content_type.cc
+ dcp_video.cc
dcp_video_frame.cc
dcpomatic_time.cc
- decoder.cc
dolby_cp750.cc
encoder.cc
examine_content_job.cc
#include "lib/log.h"
#include "lib/video_decoder.h"
#include "lib/player.h"
+#include "lib/dcp_video.h"
using std::cout;
using std::cerr;
static shared_ptr<Film> film;
static ServerDescription* server;
static shared_ptr<FileLog> log_ (new FileLog ("servomatictest.log"));
-static int frame = 0;
+static int frame_count = 0;
void
-process_video (shared_ptr<PlayerImage> image, Eyes eyes, ColourConversion conversion, DCPTime)
+process_video (shared_ptr<DCPVideo> frame)
{
shared_ptr<DCPVideoFrame> local (
- new DCPVideoFrame (image->image (PIX_FMT_RGB24, false), frame, eyes, conversion, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_)
+ new DCPVideoFrame (
+ frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
+ )
);
shared_ptr<DCPVideoFrame> remote (
- new DCPVideoFrame (image->image (PIX_FMT_RGB24, false), frame, eyes, conversion, film->video_frame_rate(), 250000000, RESOLUTION_2K, log_)
+ new DCPVideoFrame (
+ frame->image (PIX_FMT_RGB24, false), frame_count, frame->eyes(), frame->conversion(), film->video_frame_rate(), 250000000, RESOLUTION_2K, log_
+ )
);
- cout << "Frame " << frame << ": ";
+ cout << "Frame " << frame_count << ": ";
cout.flush ();
- ++frame;
+ ++frame_count;
shared_ptr<EncodedData> local_encoded = local->encode_locally ();
shared_ptr<EncodedData> remote_encoded;
film->read_metadata ();
shared_ptr<Player> player = film->make_player ();
- player->disable_audio ();
- player->Video.connect (boost::bind (process_video, _1, _2, _3, _5));
- bool done = false;
- while (!done) {
- done = player->pass ();
+ DCPTime const frame = DCPTime::from_frames (1, film->video_frame_rate ());
+ for (DCPTime t; t < film->length(); t += frame) {
+ process_video (player->get_video (t, true));
}
} catch (std::exception& e) {
cerr << "Error: " << e.what() << "\n";
#include "lib/video_content.h"
#include "lib/video_decoder.h"
#include "lib/timer.h"
+#include "lib/dcp_video.h"
#include "film_viewer.h"
#include "wx_util.h"
, _frame_number (new wxStaticText (this, wxID_ANY, wxT("")))
, _timecode (new wxStaticText (this, wxID_ANY, wxT("")))
, _play_button (new wxToggleButton (this, wxID_ANY, _("Play")))
- , _got_frame (false)
{
#ifndef __WXOSX__
_panel->SetDoubleBuffered (true);
return;
}
- _player->disable_audio ();
_player->set_approximate_size ();
- _player->Video.connect (boost::bind (&FilmViewer::process_video, this, _1, _2, _5));
_player->Changed.connect (boost::bind (&FilmViewer::player_changed, this, _1));
calculate_sizes ();
- fetch_next_frame ();
+ get (_position, true);
}
void
-FilmViewer::fetch_current_frame_again ()
+FilmViewer::get (DCPTime p, bool accurate)
{
- if (!_player) {
- return;
+ shared_ptr<DCPVideo> dcp_video = _player->get_video (p, accurate);
+ if (dcp_video) {
+ _frame = dcp_video->image (PIX_FMT_BGRA, true);
+ _frame = _frame->scale (_frame->size(), Scaler::from_id ("fastbilinear"), PIX_FMT_RGB24, false);
+ } else {
+ _frame.reset ();
}
- /* We could do this with a seek and a fetch_next_frame, but this is
- a shortcut to make it quicker.
- */
-
- _got_frame = false;
- if (!_player->repeat_last_video ()) {
- fetch_next_frame ();
- }
-
+ set_position_text (p);
_panel->Refresh ();
_panel->Update ();
}
return;
}
- fetch_next_frame ();
+ get (_position + DCPTime::from_frames (1, _film->video_frame_rate ()), true);
DCPTime const len = _film->length ();
if (len.get ()) {
- int const new_slider_position = 4096 * _player->video_position().get() / len.get();
+ int const new_slider_position = 4096 * _position.get() / len.get();
if (new_slider_position != _slider->GetValue()) {
_slider->SetValue (new_slider_position);
}
void
FilmViewer::slider_moved ()
{
- if (_film && _player) {
- try {
- DCPTime t (_slider->GetValue() * _film->length().get() / 4096);
- /* Ensure that we hit the end of the film at the end of the slider */
- if (t >= _film->length ()) {
- t = _film->length() - DCPTime::from_frames (1, _film->video_frame_rate ());
- }
- _player->seek (t, false);
- fetch_next_frame ();
- } catch (OpenFileError& e) {
- /* There was a problem opening a content file; we'll let this slide as it
- probably means a missing content file, which we're already taking care of.
- */
- }
+ if (!_film || !_player) {
+ return;
+ }
+
+ DCPTime t (_slider->GetValue() * _film->length().get() / 4096);
+ /* Ensure that we hit the end of the film at the end of the slider */
+ if (t >= _film->length ()) {
+ t = _film->length() - DCPTime::from_frames (1, _film->video_frame_rate ());
}
+ get (t, false);
}
void
_panel_size.width = ev.GetSize().GetWidth();
_panel_size.height = ev.GetSize().GetHeight();
calculate_sizes ();
- fetch_current_frame_again ();
+ get (_position, true);
}
void
}
}
-void
-FilmViewer::process_video (shared_ptr<PlayerImage> image, Eyes eyes, DCPTime t)
-{
- if (eyes == EYES_RIGHT) {
- return;
- }
-
- /* Going via BGRA here makes the scaler faster then using RGB24 directly (about
- twice on x86 Linux).
- */
- shared_ptr<Image> im = image->image (PIX_FMT_BGRA, true);
- _frame = im->scale (im->size(), Scaler::from_id ("fastbilinear"), PIX_FMT_RGB24, false);
- _got_frame = true;
-
- set_position_text (t);
-}
-
void
FilmViewer::set_position_text (DCPTime t)
{
_timecode->SetLabel (wxString::Format (wxT("%02d:%02d:%02d.%02d"), h, m, s, f));
}
-/** Ask the player to emit its next frame, then update our display */
-void
-FilmViewer::fetch_next_frame ()
-{
- /* Clear our frame in case we don't get a new one */
- _frame.reset ();
-
- if (!_player) {
- return;
- }
-
- _got_frame = false;
-
- try {
- while (!_got_frame && !_player->pass ()) {}
- } catch (DecodeError& e) {
- _play_button->SetValue (false);
- check_play_state ();
- error_dialog (this, wxString::Format (_("Could not decode video for view (%s)"), std_to_wx(e.what()).data()));
- } catch (OpenFileError& e) {
- /* There was a problem opening a content file; we'll let this slide as it
- probably means a missing content file, which we're already taking care of.
- */
- }
-
- _panel->Refresh ();
- _panel->Update ();
-}
-
void
FilmViewer::active_jobs_changed (bool a)
{
return;
}
- /* Player::video_position is the time after the last frame that we received.
- We want to see the one before it, so we need to go back 2.
- */
-
- DCPTime p = _player->video_position() - DCPTime::from_frames (2, _film->video_frame_rate ());
+ DCPTime p = _position - DCPTime::from_frames (1, _film->video_frame_rate ());
if (p < DCPTime ()) {
p = DCPTime ();
}
-
- try {
- _player->seek (p, true);
- fetch_next_frame ();
- } catch (OpenFileError& e) {
- /* There was a problem opening a content file; we'll let this slide as it
- probably means a missing content file, which we're already taking care of.
- */
- }
+
+ get (p, true);
}
void
return;
}
- fetch_next_frame ();
+ get (_position + DCPTime::from_frames (1, _film->video_frame_rate ()), true);
}
void
}
calculate_sizes ();
- fetch_current_frame_again ();
+ get (_position, true);
}
/** @class FilmViewer
* @brief A wx widget to view a preview of a Film.
- *
- * The film takes the following path through the viewer:
- *
- * 1. fetch_next_frame() asks our _player to decode some data. If it does, process_video()
- * will be called.
- *
- * 2. process_video() takes the image from the player (_frame).
- *
- * 3. fetch_next_frame() calls _panel->Refresh() and _panel->Update() which results in
- * paint_panel() being called; this creates frame_bitmap from _frame and blits it to the display.
- *
- * fetch_current_frame_again() asks the player to re-emit its current frame on the next pass(), and then
- * starts from step #1.
*/
class FilmViewer : public wxPanel
{
void slider_moved ();
void play_clicked ();
void timer ();
- void process_video (boost::shared_ptr<PlayerImage>, Eyes, DCPTime);
void calculate_sizes ();
void check_play_state ();
- void fetch_current_frame_again ();
- void fetch_next_frame ();
void active_jobs_changed (bool);
void back_clicked ();
void forward_clicked ();
void player_changed (bool);
void set_position_text (DCPTime);
+ void get (DCPTime, bool);
boost::shared_ptr<Film> _film;
boost::shared_ptr<Player> _player;
wxTimer _timer;
boost::shared_ptr<const Image> _frame;
- bool _got_frame;
+ DCPTime _position;
/** Size of our output (including padding if we have any) */
dcp::Size _out_size;
*/
#include "lib/subrip_decoder.h"
-#include "lib/decoded.h"
+#include "lib/content_subtitle.h"
#include "subtitle_view.h"
using std::list;
}
shared_ptr<SubRipDecoder> decoder (new SubRipDecoder (content));
+ list<shared_ptr<ContentTextSubtitle> > subs = decoder->get_text_subtitles (ContentTime(), ContentTime::max ());
int n = 0;
- while (1) {
- shared_ptr<Decoded> dec = decoder->peek ();
- if (!dec) {
- break;
- }
-
- shared_ptr<DecodedTextSubtitle> sub = dynamic_pointer_cast<DecodedTextSubtitle> (dec);
- assert (sub);
-
- for (list<dcp::SubtitleString>::const_iterator i = sub->subs.begin(); i != sub->subs.end(); ++i) {
+ for (list<shared_ptr<ContentTextSubtitle> >::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ for (list<dcp::SubtitleString>::const_iterator j = (*i)->subs.begin(); j != (*i)->subs.end(); ++j) {
wxListItem list_item;
list_item.SetId (n);
_list->InsertItem (list_item);
- _list->SetItem (n, 2, i->text ());
+ _list->SetItem (n, 2, j->text ());
++n;
}
-
- decoder->consume ();
}
SetSizerAndFit (sizer);
+++ /dev/null
-/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <boost/test/unit_test.hpp>
-#include <boost/bind.hpp>
-#include <boost/function.hpp>
-#include <boost/signals2.hpp>
-#include "lib/audio_merger.h"
-#include "lib/audio_buffers.h"
-
-using boost::shared_ptr;
-using boost::bind;
-
-static shared_ptr<const AudioBuffers> last_audio;
-
-BOOST_AUTO_TEST_CASE (audio_merger_test1)
-{
- int const frame_rate = 48000;
- AudioMerger merger (1, frame_rate);
-
- /* Push 64 samples, 0 -> 63 at time 0 */
- shared_ptr<AudioBuffers> buffers (new AudioBuffers (1, 64));
- for (int i = 0; i < 64; ++i) {
- buffers->data()[0][i] = i;
- }
- merger.push (buffers, DCPTime ());
-
- /* Push 64 samples, 0 -> 63 at time 22 */
- merger.push (buffers, DCPTime::from_frames (22, frame_rate));
-
- TimedAudioBuffers tb = merger.pull (DCPTime::from_frames (22, frame_rate));
- BOOST_CHECK (tb.audio != shared_ptr<const AudioBuffers> ());
- BOOST_CHECK_EQUAL (tb.audio->frames(), 22);
- BOOST_CHECK_EQUAL (tb.time, DCPTime ());
-
- /* And they should be a staircase */
- for (int i = 0; i < 22; ++i) {
- BOOST_CHECK_EQUAL (tb.audio->data()[0][i], i);
- }
-
- tb = merger.flush ();
-
- /* That flush should give us 64 samples at 22 */
- BOOST_CHECK_EQUAL (tb.audio->frames(), 64);
- BOOST_CHECK_EQUAL (tb.time, DCPTime::from_frames (22, frame_rate));
-
- /* Check the sample values */
- for (int i = 0; i < 64; ++i) {
- int correct = i;
- if (i < (64 - 22)) {
- correct += i + 22;
- }
- BOOST_CHECK_EQUAL (tb.audio->data()[0][i], correct);
- }
-}
-
-BOOST_AUTO_TEST_CASE (audio_merger_test2)
-{
- int const frame_rate = 48000;
- AudioMerger merger (1, frame_rate);
-
- /* Push 64 samples, 0 -> 63 at time 9 */
- shared_ptr<AudioBuffers> buffers (new AudioBuffers (1, 64));
- for (int i = 0; i < 64; ++i) {
- buffers->data()[0][i] = i;
- }
- merger.push (buffers, DCPTime::from_frames (9, frame_rate));
-
- TimedAudioBuffers tb = merger.pull (DCPTime::from_frames (9, frame_rate));
- BOOST_CHECK_EQUAL (tb.audio->frames(), 9);
- BOOST_CHECK_EQUAL (tb.time, DCPTime ());
-
- for (int i = 0; i < 9; ++i) {
- BOOST_CHECK_EQUAL (tb.audio->data()[0][i], 0);
- }
-
- tb = merger.flush ();
-
- /* That flush should give us 64 samples at 9 */
- BOOST_CHECK_EQUAL (tb.audio->frames(), 64);
- BOOST_CHECK_EQUAL (tb.time, DCPTime::from_frames (9, frame_rate));
-
- /* Check the sample values */
- for (int i = 0; i < 64; ++i) {
- BOOST_CHECK_EQUAL (tb.audio->data()[0][i], i);
- }
-}
/* Sound == video so no offset required */
content->_first_video = ContentTime ();
content->_audio_stream->first_audio = ContentTime ();
- FFmpegDecoder decoder (content, film->log(), true, true, true);
+ FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
/* Common offset should be removed */
content->_first_video = ContentTime::from_seconds (600);
content->_audio_stream->first_audio = ContentTime::from_seconds (600);
- FFmpegDecoder decoder (content, film->log(), true, true, true);
+ FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime::from_seconds (-600));
}
/* Video is on a frame boundary */
content->_first_video = ContentTime::from_frames (1, 24);
content->_audio_stream->first_audio = ContentTime ();
- FFmpegDecoder decoder (content, film->log(),true, true, true);
+ FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
double const frame = 1.0 / 24.0;
content->_first_video = ContentTime::from_seconds (frame + 0.0215);
content->_audio_stream->first_audio = ContentTime ();
- FFmpegDecoder decoder (content, film->log(), true, true, true);
+ FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_CLOSE (decoder._pts_offset.seconds(), (frame - 0.0215), 0.00001);
}
double const frame = 1.0 / 24.0;
content->_first_video = ContentTime::from_seconds (frame + 0.0215 + 4.1);
content->_audio_stream->first_audio = ContentTime::from_seconds (4.1);
- FFmpegDecoder decoder (content, film->log(), true, true, true);
+ FFmpegDecoder decoder (content, film->log());
BOOST_CHECK_EQUAL (decoder._pts_offset.seconds(), (frame - 0.0215) - 4.1);
}
}
+++ /dev/null
-/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-/** @file test/ffmpeg_seek_test.cc
- * @brief Test seek using Player with an FFmpegDecoder; note that the player
- * can hide problems with FFmpegDecoder seeking as it will skip frames / insert
- * black as it sees fit.
- */
-
-#include <boost/test/unit_test.hpp>
-#include "lib/player.h"
-#include "lib/ffmpeg_decoder.h"
-#include "lib/film.h"
-#include "lib/ratio.h"
-#include "test.h"
-
-using std::cout;
-using std::string;
-using std::stringstream;
-using boost::shared_ptr;
-using boost::optional;
-
-#define FFMPEG_SEEK_TEST_DEBUG 1
-
-optional<DCPTime> first_video;
-optional<DCPTime> first_audio;
-shared_ptr<Film> film;
-
-static void
-process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime t)
-{
- if (!first_video) {
- first_video = t;
- }
-}
-
-static void
-process_audio (shared_ptr<const AudioBuffers>, DCPTime t)
-{
- if (!first_audio) {
- first_audio = t;
- }
-}
-
-static string
-print_time (DCPTime t, float fps)
-{
- stringstream s;
- s << t.seconds() << "s " << t.frames (fps) << "f";
- return s.str ();
-}
-
-static void
-check (shared_ptr<Player> p, DCPTime t)
-{
- first_video.reset ();
- first_audio.reset ();
-
-#if FFMPEG_SEEK_TEST_DEBUG == 1
- cout << "\n-- Seek to " << print_time (t, 24) << "\n";
-#endif
-
- p->seek (t, true);
- while (!first_video || !first_audio) {
- p->pass ();
- }
-
-#if FFMPEG_SEEK_TEST_DEBUG == 1
- cout << "First video " << print_time (first_video.get(), 24) << "\n";
- cout << "First audio " << print_time (first_audio.get(), 24) << "\n";
-#endif
-
- /* Outputs should be on or after seek time */
- BOOST_CHECK (first_video.get() >= t);
- BOOST_CHECK (first_audio.get() >= t);
- /* And should be rounded to frame boundaries */
- BOOST_CHECK_EQUAL (first_video.get(), first_video.get().round_up (film->video_frame_rate()));
- BOOST_CHECK_EQUAL (first_audio.get(), first_audio.get().round_up (film->audio_frame_rate()));
-}
-
-/* Test basic seeking */
-BOOST_AUTO_TEST_CASE (ffmpeg_seek_test)
-{
- film = new_test_film ("ffmpeg_seek_test");
- film->set_name ("ffmpeg_seek_test");
- film->set_container (Ratio::from_id ("185"));
- shared_ptr<FFmpegContent> c (new FFmpegContent (film, "test/data/staircase.mov"));
- c->set_scale (VideoContentScale (Ratio::from_id ("185")));
- film->examine_and_add_content (c);
-
- wait_for_jobs ();
-
- shared_ptr<Player> player = film->make_player ();
- player->Video.connect (boost::bind (&process_video, _1, _2, _3, _4, _5));
- player->Audio.connect (boost::bind (&process_audio, _1, _2));
-
- check (player, DCPTime::from_seconds (0));
- check (player, DCPTime::from_seconds (0.1));
- check (player, DCPTime::from_seconds (0.2));
- check (player, DCPTime::from_seconds (0.3));
-}
+++ /dev/null
-/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <boost/test/unit_test.hpp>
-#include "lib/player.h"
-#include "lib/ffmpeg_decoder.h"
-#include "lib/film.h"
-#include "lib/ratio.h"
-#include "test.h"
-
-using std::cout;
-using std::string;
-using std::stringstream;
-using boost::shared_ptr;
-
-#define LONG_FFMPEG_SEEK_TEST_DEBUG 1
-
-boost::optional<DCPTime> first_video;
-boost::optional<DCPTime> first_audio;
-
-static void
-process_video (shared_ptr<PlayerImage>, Eyes, ColourConversion, bool, DCPTime t)
-{
- if (!first_video) {
- first_video = t;
- }
-}
-
-static void
-process_audio (shared_ptr<const AudioBuffers>, DCPTime t)
-{
- if (!first_audio) {
- first_audio = t;
- }
-}
-
-static string
-print_time (DCPTime t, float fps)
-{
- stringstream s;
- s << t << " " << t.seconds() << "s " << t.frames(fps) << "f";
- return s.str ();
-}
-
-static void
-check (shared_ptr<Player> p, DCPTime t)
-{
- first_video.reset ();
- first_audio.reset ();
-
-#if LONG_FFMPEG_SEEK_TEST_DEBUG == 1
- cout << "\n-- Seek to " << print_time (t, 24) << "\n";
-#endif
-
- p->seek (t, true);
- while (!first_video || !first_audio) {
- p->pass ();
- }
-
-#if LONG_FFMPEG_SEEK_TEST_DEBUG == 1
- cout << "First video " << print_time (first_video.get(), 24) << "\n";
- cout << "First audio " << print_time (first_audio.get(), 24) << "\n";
-#endif
-
- BOOST_CHECK (first_video.get() >= t);
- BOOST_CHECK (first_audio.get() >= t);
-}
-
-BOOST_AUTO_TEST_CASE (long_ffmpeg_seek_test)
-{
- shared_ptr<Film> film = new_test_film ("long_ffmpeg_audio_test");
- film->set_name ("long_ffmpeg_audio_test");
- film->set_container (Ratio::from_id ("185"));
- shared_ptr<FFmpegContent> c (new FFmpegContent (film, "test/long_data/dolby_aurora.vob"));
- c->set_scale (VideoContentScale (Ratio::from_id ("185")));
- film->examine_and_add_content (c);
-
- wait_for_jobs ();
-
- shared_ptr<Player> player = film->make_player ();
- player->Video.connect (boost::bind (&process_video, _1, _2, _3, _4, _5));
- player->Audio.connect (boost::bind (&process_audio, _1, _2));
-
- for (float i = 0; i < 10; i += 0.1) {
- check (player, DCPTime::from_seconds (i));
- }
-}
-
-
+++ /dev/null
-/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <boost/test/unit_test.hpp>
-#include "lib/player.h"
-#include "lib/ratio.h"
-#include "lib/dcp_content_type.h"
-#include "test.h"
-
-/* This test needs stuff in Player that is only included in debug mode */
-#ifdef DCPOMATIC_DEBUG
-
-using std::cout;
-using boost::optional;
-using boost::shared_ptr;
-
-struct Video
-{
- boost::shared_ptr<Content> content;
- boost::shared_ptr<const Image> image;
- DCPTime time;
-};
-
-class PlayerWrapper
-{
-public:
- PlayerWrapper (shared_ptr<Player> p)
- : _player (p)
- {
- _player->Video.connect (bind (&PlayerWrapper::process_video, this, _1, _2, _5));
- }
-
- void process_video (shared_ptr<PlayerImage> i, bool, DCPTime t)
- {
- Video v;
- v.content = _player->_last_video;
- v.image = i->image (PIX_FMT_RGB24, false);
- v.time = t;
- _queue.push_front (v);
- }
-
- optional<Video> get_video ()
- {
- while (_queue.empty() && !_player->pass ()) {}
- if (_queue.empty ()) {
- return optional<Video> ();
- }
-
- Video v = _queue.back ();
- _queue.pop_back ();
- return v;
- }
-
- void seek (DCPTime t, bool ac)
- {
- _player->seek (t, ac);
- _queue.clear ();
- }
-
-private:
- shared_ptr<Player> _player;
- std::list<Video> _queue;
-};
-
-BOOST_AUTO_TEST_CASE (play_test)
-{
- shared_ptr<Film> film = new_test_film ("play_test");
- film->set_dcp_content_type (DCPContentType::from_dci_name ("FTR"));
- film->set_container (Ratio::from_id ("185"));
- film->set_name ("play_test");
-
- shared_ptr<FFmpegContent> A (new FFmpegContent (film, "test/data/red_24.mp4"));
- film->examine_and_add_content (A);
- wait_for_jobs ();
-
- BOOST_CHECK_EQUAL (A->video_length_after_3d_combine().frames (24), 16);
-
- shared_ptr<FFmpegContent> B (new FFmpegContent (film, "test/data/red_30.mp4"));
- film->examine_and_add_content (B);
- wait_for_jobs ();
-
- BOOST_CHECK_EQUAL (B->video_length_after_3d_combine().frames (30), 16);
-
- /* Film should have been set to 25fps */
- BOOST_CHECK_EQUAL (film->video_frame_rate(), 25);
-
- BOOST_CHECK_EQUAL (A->position(), DCPTime ());
- /* A is 16 frames long at 25 fps */
- BOOST_CHECK_EQUAL (B->position(), DCPTime::from_frames (16, 25));
-
- shared_ptr<Player> player = film->make_player ();
- PlayerWrapper wrap (player);
-
- for (int i = 0; i < 32; ++i) {
- optional<Video> v = wrap.get_video ();
- BOOST_CHECK (v);
- if (i < 16) {
- BOOST_CHECK (v.get().content == A);
- } else {
- BOOST_CHECK (v.get().content == B);
- }
- }
-
- player->seek (DCPTime::from_frames (6, 25), true);
- optional<Video> v = wrap.get_video ();
- BOOST_CHECK (v);
- BOOST_CHECK_EQUAL (v.get().time, DCPTime::from_frames (6, 25));
-}
-
-#endif
#include "lib/ratio.h"
#include "lib/dcp_content_type.h"
#include "lib/ffmpeg_decoder.h"
+#include "lib/content_video.h"
#include "test.h"
using std::cout;
film->examine_and_add_content (content);
wait_for_jobs ();
- FFmpegDecoder decoder (content, film->log(), true, false, false);
- shared_ptr<DecodedVideo> a = dynamic_pointer_cast<DecodedVideo> (decoder.peek ());
- decoder.seek (ContentTime(), true);
- shared_ptr<DecodedVideo> b = dynamic_pointer_cast<DecodedVideo> (decoder.peek ());
-
- /* a will be after no seek, and b after a seek to zero, which should
- have the same effect.
- */
- BOOST_CHECK_EQUAL (a->content_time, b->content_time);
+ FFmpegDecoder decoder (content, film->log());
+ shared_ptr<ContentVideo> a = decoder.get_video (0, true);
+ shared_ptr<ContentVideo> b = decoder.get_video (0, true);
+ BOOST_CHECK_EQUAL (a->frame, 0);
+ BOOST_CHECK_EQUAL (b->frame, 0);
}
shared_ptr<Film> film = new_test_film ("subrip_render_test");
shared_ptr<SubRipDecoder> decoder (new SubRipDecoder (content));
- shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (decoder->peek ());
+ list<shared_ptr<ContentTextSubtitle> > cts = decoder->get_text_subtitles (ContentTime::from_seconds (109), ContentTime::from_seconds (110));
+ BOOST_CHECK_EQUAL (cts.size(), 1);
- shared_ptr<Image> image;
- Position<int> position;
- render_subtitles (dts->subs, dcp::Size (1998, 1080), image, position);
- write_image (image, "build/test/subrip_render_test.png");
+ PositionImage image = render_subtitles (cts.front()->subs, dcp::Size (1998, 1080));
+ write_image (image.image, "build/test/subrip_render_test.png");
check_file ("build/test/subrip_render_test.png", "test/data/subrip_render_test.png");
}
audio_analysis_test.cc
audio_delay_test.cc
audio_mapping_test.cc
- audio_merger_test.cc
black_fill_test.cc
client_server_test.cc
colour_conversion_test.cc
ffmpeg_dcp_test.cc
ffmpeg_examiner_test.cc
ffmpeg_pts_offset.cc
- ffmpeg_seek_test.cc
file_group_test.cc
film_metadata_test.cc
frame_rate_test.cc
job_test.cc
make_black_test.cc
pixel_formats_test.cc
- play_test.cc
ratio_test.cc
repeat_frame_test.cc
recover_test.cc
obj.use = 'libdcpomatic'
obj.source = """
test.cc
- long_ffmpeg_seek_test.cc
"""
obj.target = 'long-unit-tests'