+2013-04-21 Carl Hetherington <cth@carlh.net>
+
+ * Version 0.84beta5 released.
+
+2013-04-20 Carl Hetherington <cth@carlh.net>
+
+ * Fix bad saving of metadata in locales which use
+ commas to separate decimals (#119).
+
+2013-04-19 Carl Hetherington <cth@carlh.net>
+
+ * Add basic frame index and timecode to viewer, and previous/next
+ frame buttons.
+
+ * Version 0.84beta4 released.
+
+2013-04-19 Carl Hetherington <cth@carlh.net>
+
+ * Version 0.84beta3 released.
+
+2013-04-19 Carl Hetherington <cth@carlh.net>
+
+ * Version 0.84beta2 released.
+
+2013-04-18 Carl Hetherington <cth@carlh.net>
+
+ * Version 0.84beta1 released.
+
2013-04-15 Carl Hetherington <cth@carlh.net>
* Fix error message on forcing language to English (#103).
+ * Fix problems with content whose first audio content
+ comes before the first video (resulting in audio being
+ chopped off at the start of the DCP) (#79).
+
+ * Use true 4:3 rather than 1.33.
+
2013-04-13 Carl Hetherington <cth@carlh.net>
* Use film-name-derived names for MXFs in DCPs (#54).
return ()
else:
return (('openjpeg-cdist', None),
- ('ffmpeg-cdist', '35a110e4270b070146a84a827b10f75c5916e08a'),
+ ('ffmpeg-cdist', '488d5d4496af5e3a3b9d31d6b221e8eeada6b77e'),
('libdcp', 'v0.45'))
def build(env, target):
+++ /dev/null
-import cdist
-
-def builds():
- return ['source']
-
-def build_source():
- cdist.build_source_waf()
-
+dvdomatic (0.84beta5-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Carl Hetherington <carl@houllier.lan> Sun, 21 Apr 2013 00:06:12 +0100
+
+dvdomatic (0.84beta4-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Carl Hetherington <carl@houllier.lan> Fri, 19 Apr 2013 17:41:58 +0100
+
+dvdomatic (0.84beta3-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Carl Hetherington <carl@houllier.lan> Fri, 19 Apr 2013 11:36:37 +0100
+
+dvdomatic (0.84beta2-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Carl Hetherington <carl@houllier.lan> Fri, 19 Apr 2013 11:12:09 +0100
+
+dvdomatic (0.84beta1-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Carl Hetherington <carl@houllier.lan> Thu, 18 Apr 2013 23:32:17 +0100
+
dvdomatic (0.83-1) UNRELEASED; urgency=low
* New upstream release.
_da = decoder_factory (_film_a, o);
_db = decoder_factory (_film_b, o);
- if (_film_a->audio_stream()) {
- shared_ptr<AudioStream> st = _film_a->audio_stream();
- _matcher.reset (new Matcher (_film_a->log(), st->sample_rate(), _film_a->source_frame_rate()));
- _delay_line.reset (new DelayLine (_film_a->log(), st->channels(), _film_a->audio_delay() * st->sample_rate() / 1000));
- _gain.reset (new Gain (_film_a->log(), _film_a->audio_gain()));
- }
+ shared_ptr<AudioStream> st = _film_a->audio_stream();
+ _matcher.reset (new Matcher (_film_a->log(), st->sample_rate(), _film_a->source_frame_rate()));
+ _delay_line.reset (new DelayLine (_film_a->log(), _film_a->audio_delay() / 1000.0f));
+ _gain.reset (new Gain (_film_a->log(), _film_a->audio_gain()));
/* Set up the decoder to use the film's set streams */
_da.video->set_subtitle_stream (_film_a->subtitle_stream ());
_db.video->set_subtitle_stream (_film_a->subtitle_stream ());
_da.audio->set_audio_stream (_film_a->audio_stream ());
- _da.video->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3));
- _db.video->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3));
+ _da.video->Video.connect (bind (&Combiner::process_video, _combiner, _1, _2, _3, _4));
+ _db.video->Video.connect (bind (&Combiner::process_video_b, _combiner, _1, _2, _3, _4));
- if (_matcher) {
- _combiner->connect_video (_matcher);
- _matcher->connect_video (_encoder);
- } else {
- _combiner->connect_video (_encoder);
- }
+ _combiner->connect_video (_delay_line);
+ _delay_line->connect_video (_matcher);
+ _matcher->connect_video (_encoder);
- if (_matcher && _delay_line) {
- _da.audio->connect_audio (_delay_line);
- _delay_line->connect_audio (_matcher);
- _matcher->connect_audio (_gain);
- _gain->connect_audio (_encoder);
- }
+ _da.audio->connect_audio (_delay_line);
+ _delay_line->connect_audio (_matcher);
+ _matcher->connect_audio (_gain);
+ _gain->connect_audio (_encoder);
}
void
/** @class AudioDecoder.
* @brief Parent class for audio decoders.
*/
-class AudioDecoder : public AudioSource, public virtual Decoder
+class AudioDecoder : public TimedAudioSource, public virtual Decoder
{
public:
AudioDecoder (boost::shared_ptr<Film>, DecodeOptions);
virtual void process_audio (boost::shared_ptr<AudioBuffers>) = 0;
};
+class TimedAudioSink
+{
+public:
+ /** Call with some audio data */
+ virtual void process_audio (boost::shared_ptr<AudioBuffers>, double t) = 0;
+};
+
#endif
{
Audio.connect (bind (&AudioSink::process_audio, s, _1));
}
+
+void
+TimedAudioSource::connect_audio (shared_ptr<TimedAudioSink> s)
+{
+ Audio.connect (bind (&TimedAudioSink::process_audio, s, _1, _2));
+}
class AudioBuffers;
class AudioSink;
+class TimedAudioSink;
/** A class that emits audio data */
class AudioSource
void connect_audio (boost::shared_ptr<AudioSink>);
};
+
+/** A class that emits audio data with timestamps */
+class TimedAudioSource
+{
+public:
+ /** Emitted when some audio data is ready */
+ boost::signals2::signal<void (boost::shared_ptr<AudioBuffers>, double)> Audio;
+
+ void connect_audio (boost::shared_ptr<TimedAudioSink>);
+};
+
#endif
using boost::shared_ptr;
Combiner::Combiner (shared_ptr<Log> log)
- : VideoProcessor (log)
+ : TimedVideoProcessor (log)
{
}
* @param image Frame image.
*/
void
-Combiner::process_video (shared_ptr<Image> image, bool, shared_ptr<Subtitle>)
+Combiner::process_video (shared_ptr<Image> image, bool, shared_ptr<Subtitle>, double)
{
_image = image;
}
* @param sub Subtitle (which will be put onto the whole frame)
*/
void
-Combiner::process_video_b (shared_ptr<Image> image, bool, shared_ptr<Subtitle> sub)
+Combiner::process_video_b (shared_ptr<Image> image, bool, shared_ptr<Subtitle> sub, double t)
{
/* Copy the right half of this image into our _image */
/* XXX: this should probably be in the Image class */
}
}
- Video (_image, false, sub);
+ Video (_image, false, sub, t);
_image.reset ();
}
* one image used for the left half of the screen and the other for
* the right.
*/
-class Combiner : public VideoProcessor
+class Combiner : public TimedVideoProcessor
{
public:
Combiner (boost::shared_ptr<Log> log);
- void process_video (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s);
- void process_video_b (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s);
+ void process_video (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s, double);
+ void process_video_b (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s, double);
private:
/** The image that we are currently working on */
virtual bool pass () = 0;
virtual bool seek (double);
virtual bool seek_to_last ();
+ virtual void seek_back () {}
+ virtual void seek_forward () {}
boost::signals2::signal<void()> OutputChanged;
using std::min;
using boost::shared_ptr;
-/** @param channels Number of channels of audio.
- * @param frames Delay in frames, +ve to move audio later.
+/* @param seconds Delay in seconds, +ve to move audio later.
*/
-DelayLine::DelayLine (shared_ptr<Log> log, int channels, int frames)
- : AudioProcessor (log)
- , _negative_delay_remaining (0)
- , _frames (frames)
+DelayLine::DelayLine (shared_ptr<Log> log, double seconds)
+ : TimedAudioVideoProcessor (log)
+ , _seconds (seconds)
{
- if (_frames > 0) {
- /* We need a buffer to keep some data in */
- _buffers.reset (new AudioBuffers (channels, _frames));
- _buffers->make_silent ();
- } else if (_frames < 0) {
- /* We can do -ve delays just by chopping off
- the start, so no buffer needed.
- */
- _negative_delay_remaining = -_frames;
- }
+
}
void
-DelayLine::process_audio (shared_ptr<AudioBuffers> data)
+DelayLine::process_audio (shared_ptr<AudioBuffers> data, double t)
{
- if (_buffers) {
- /* We have some buffers, so we are moving the audio later */
-
- /* Copy the input data */
- AudioBuffers input (*data.get ());
-
- int to_do = data->frames ();
-
- /* Write some of our buffer to the output */
- int const from_buffer = min (to_do, _buffers->frames());
- data->copy_from (_buffers.get(), from_buffer, 0, 0);
- to_do -= from_buffer;
-
- /* Write some of the input to the output */
- int const from_input = to_do;
- data->copy_from (&input, from_input, 0, from_buffer);
-
- int const left_in_buffer = _buffers->frames() - from_buffer;
-
- /* Shuffle our buffer down */
- _buffers->move (from_buffer, 0, left_in_buffer);
-
- /* Copy remaining input data to our buffer */
- _buffers->copy_from (&input, input.frames() - from_input, from_input, left_in_buffer);
-
- } else {
+ if (_seconds > 0) {
+ t += _seconds;
+ }
- /* Chop the initial data off until _negative_delay_remaining
- is zero, then just pass data.
- */
+ Audio (data, t);
+}
- int const to_do = min (data->frames(), _negative_delay_remaining);
- if (to_do) {
- data->move (to_do, 0, data->frames() - to_do);
- data->set_frames (data->frames() - to_do);
- _negative_delay_remaining -= to_do;
- }
+void
+DelayLine::process_video (boost::shared_ptr<Image> image, bool same, boost::shared_ptr<Subtitle> sub, double t)
+{
+ if (_seconds < 0) {
+ t += _seconds;
}
- Audio (data);
+ Video (image, same, sub, t);
}
#include <boost/shared_ptr.hpp>
#include "processor.h"
-class AudioBuffers;
-
-/** A delay line for audio */
-class DelayLine : public AudioProcessor
+/** A delay line */
+class DelayLine : public TimedAudioVideoProcessor
{
public:
- DelayLine (boost::shared_ptr<Log> log, int channels, int frames);
+ DelayLine (boost::shared_ptr<Log> log, double);
- void process_audio (boost::shared_ptr<AudioBuffers>);
+ void process_video (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>, double);
+ void process_audio (boost::shared_ptr<AudioBuffers>, double);
private:
- boost::shared_ptr<AudioBuffers> _buffers;
- int _negative_delay_remaining; ///< number of frames of negative delay that remain to emit
- int _frames;
+ double _seconds;
};
setup_video ();
setup_audio ();
setup_subtitle ();
-
- if (!o.video_sync) {
- _first_video = 0;
- }
}
FFmpegDecoder::~FFmpegDecoder ()
av_strerror (r, buf, sizeof(buf));
_film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
-
+
/* Get any remaining frames */
_packet.data = 0;
_packet.size = 0;
-
+
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
-
+
int frame_finished;
-
+
if (_opt.decode_video) {
while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
- filter_and_emit_video (_frame);
+ filter_and_emit_video ();
}
}
-
+
if (_audio_stream && _opt.decode_audio) {
decode_audio_packet ();
}
-
+
return true;
}
_film->log()->log (String::compose (N_("Used only %1 bytes of %2 in packet"), r, _packet.size));
}
- if (_opt.video_sync) {
- out_with_sync ();
- } else {
- filter_and_emit_video (_frame);
- }
+ filter_and_emit_video ();
}
} else if (ffa && _packet.stream_index == ffa->id() && _opt.decode_audio) {
decode_audio_packet ();
- } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt.decode_subtitles && _first_video) {
+ } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt.decode_subtitles) {
int got_subtitle;
AVSubtitle sub;
FFmpegDecoder::stream_name (AVStream* s) const
{
stringstream n;
-
- AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
- if (lang) {
- n << lang->value;
- }
-
- AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
- if (title) {
- if (!n.str().empty()) {
- n << N_(" ");
+
+ if (s->metadata) {
+ AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
+ if (lang) {
+ n << lang->value;
+ }
+
+ AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
+ if (title) {
+ if (!n.str().empty()) {
+ n << N_(" ");
+ }
+ n << title->value;
}
- n << title->value;
}
if (n.str().empty()) {
}
void
-FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+FFmpegDecoder::filter_and_emit_video ()
{
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
shared_ptr<FilterGraph> graph;
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
++i;
}
if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+ graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
_filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), frame->width, frame->height, frame->format));
+ _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
} else {
graph = *i;
}
- list<shared_ptr<Image> > images = graph->process (frame);
+ list<shared_ptr<Image> > images = graph->process (_frame);
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- emit_video (*i, frame_time ());
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet != AV_NOPTS_VALUE) {
+ emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
+ } else {
+ _film->log()->log ("Dropping frame without PTS");
+ }
}
}
bool
FFmpegDecoder::seek (double p)
{
- return do_seek (p, false);
+ return do_seek (p, false, false);
}
bool
(used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
staying in the same place.
*/
- return do_seek (last_source_time(), true);
+ return do_seek (last_source_time(), true, false);
+}
+
+void
+FFmpegDecoder::seek_back ()
+{
+ do_seek (last_source_time() - 2.5 / frames_per_second (), true, true);
+}
+
+void
+FFmpegDecoder::seek_forward ()
+{
+ do_seek (last_source_time() - 0.5 / frames_per_second(), true, true);
}
bool
-FFmpegDecoder::do_seek (double p, bool backwards)
+FFmpegDecoder::do_seek (double p, bool backwards, bool accurate)
{
int64_t const vt = p / av_q2d (_format_context->streams[_video_stream]->time_base);
int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
-
+
avcodec_flush_buffers (_video_codec_context);
if (_subtitle_codec_context) {
avcodec_flush_buffers (_subtitle_codec_context);
}
-
+
+ if (accurate) {
+ while (1) {
+ int r = av_read_frame (_format_context, &_packet);
+ if (r < 0) {
+ return true;
+ }
+
+ avcodec_get_frame_defaults (_frame);
+
+ if (_packet.stream_index == _video_stream) {
+ int finished = 0;
+ int const r = avcodec_decode_video2 (_video_codec_context, _frame, &finished, &_packet);
+ if (r >= 0 && finished) {
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet > vt) {
+ break;
+ }
+ }
+ }
+
+ av_free_packet (&_packet);
+ }
+ }
+
return r < 0;
}
return String::compose (N_("ffmpeg %1 %2 %3 %4"), _id, _sample_rate, _channel_layout, _name);
}
-void
-FFmpegDecoder::out_with_sync ()
-{
- /* Where we are in the output, in seconds */
- double const out_pts_seconds = video_frame() / frames_per_second();
-
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- _film->log()->log (
- String::compose (N_("Source video frame ready; source at %1, output at %2"), source_pts_seconds, out_pts_seconds),
- Log::VERBOSE
- );
-
- if (!_first_video) {
- _first_video = source_pts_seconds;
- }
-
- /* Difference between where we are and where we should be */
- double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
- double const one_frame = 1 / frames_per_second();
-
- /* Insert frames if required to get out_pts_seconds up to pts_seconds */
- if (delta > one_frame) {
- int const extra = rint (delta / one_frame);
- for (int i = 0; i < extra; ++i) {
- repeat_last_video ();
- _film->log()->log (
- String::compose (
- N_("Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)"),
- out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
- )
- );
- }
- }
-
- if (delta > -one_frame) {
- /* Process this frame */
- filter_and_emit_video (_frame);
- } else {
- /* Otherwise we are omitting a frame to keep things right */
- _film->log()->log (String::compose (N_("Frame removed at %1s"), out_pts_seconds));
- }
-}
-
void
FFmpegDecoder::film_changed (Film::Property p)
{
return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
}
-double
-FFmpegDecoder::frame_time () const
-{
- return av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
-}
-
void
FFmpegDecoder::decode_audio_packet ()
{
int frame_finished;
int const decode_result = avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, ©_packet);
- if (decode_result >= 0 && frame_finished) {
-
- /* Where we are in the source, in seconds */
- double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame);
-
- /* We only decode audio if we've had our first video packet through, and if it
- was before this packet. Until then audio is thrown away.
- */
+ if (decode_result >= 0) {
+ if (frame_finished) {
- if ((_first_video && _first_video.get() <= source_pts_seconds) || !_opt.decode_video) {
-
- if (!_first_audio && _opt.decode_video) {
- _first_audio = source_pts_seconds;
-
- /* This is our first audio frame, and if we've arrived here we must have had our
- first video frame. Push some silence to make up any gap between our first
- video frame and our first audio.
- */
-
- /* frames of silence that we must push */
- int const s = rint ((_first_audio.get() - _first_video.get()) * ffa->sample_rate ());
-
- _film->log()->log (
- String::compose (
- N_("First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)"),
- _first_video.get(), _first_audio.get(), s, ffa->channels(), bytes_per_audio_sample()
- )
- );
-
- if (s) {
- shared_ptr<AudioBuffers> audio (new AudioBuffers (ffa->channels(), s));
- audio->make_silent ();
- Audio (audio);
- }
- }
+ /* Where we are in the source, in seconds */
+ double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
+ * av_frame_get_best_effort_timestamp(_frame);
int const data_size = av_samples_get_buffer_size (
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
assert (_audio_codec_context->channels == _film->audio_channels());
- Audio (deinterleave_audio (_frame->data, data_size));
+ Audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds);
}
- }
-
- if (decode_result >= 0) {
+
copy_packet.data += decode_result;
copy_packet.size -= decode_result;
}
bool seek (double);
bool seek_to_last ();
+ void seek_forward ();
+ void seek_back ();
private:
bool pass ();
- bool do_seek (double p, bool);
+ bool do_seek (double p, bool, bool);
PixelFormat pixel_format () const;
AVSampleFormat audio_sample_format () const;
int bytes_per_audio_sample () const;
- void out_with_sync ();
- void filter_and_emit_video (AVFrame *);
- double frame_time () const;
+ void filter_and_emit_video ();
void setup_general ();
void setup_video ();
AVPacket _packet;
- boost::optional<double> _first_video;
- boost::optional<double> _first_audio;
-
std::list<boost::shared_ptr<FilterGraph> > _filter_graphs;
boost::mutex _filter_graphs_mutex;
};
if (must_exist) {
read_metadata ();
+ } else {
+ write_metadata ();
}
_log.reset (new FileLog (file ("log")));
Film::video_state_identifier () const
{
assert (format ());
+ LocaleGuard lg;
pair<string, string> f = Filter::ffmpeg_strings (filters());
Film::write_metadata () const
{
boost::mutex::scoped_lock lm (_state_mutex);
+ LocaleGuard lg;
boost::filesystem::create_directories (directory());
Film::read_metadata ()
{
boost::mutex::scoped_lock lm (_state_mutex);
+ LocaleGuard lg;
_external_audio.clear ();
_content_audio_streams.clear ();
s << _nickname << N_(" (");
}
- s << setprecision(3) << (_ratio / 100.0) << N_(":1");
+ s << setprecision(3) << _ratio << N_(":1");
if (!_nickname.empty ()) {
s << N_(")");
{
/// TRANSLATORS: these are film picture aspect ratios; "Academy" means 1.37, "Flat" 1.85 and "Scope" 2.39.
_formats.push_back (
- new FixedFormat (119, libdcp::Size (1285, 1080), N_("119"), _("1.19"), N_("F")
+ new FixedFormat (1.19, libdcp::Size (1285, 1080), N_("119"), _("1.19"), N_("F")
));
_formats.push_back (
- new FixedFormat (133, libdcp::Size (1436, 1080), N_("133"), _("1.33"), N_("F")
+ new FixedFormat (4.0 / 3.0, libdcp::Size (1436, 1080), N_("133"), _("4:3"), N_("F")
));
_formats.push_back (
- new FixedFormat (138, libdcp::Size (1485, 1080), N_("138"), _("1.375"), N_("F")
+ new FixedFormat (1.38, libdcp::Size (1485, 1080), N_("138"), _("1.375"), N_("F")
));
_formats.push_back (
- new FixedFormat (133, libdcp::Size (1998, 1080), N_("133-in-flat"), _("4:3 within Flat"), N_("F")
+ new FixedFormat (4.0 / 3.0, libdcp::Size (1998, 1080), N_("133-in-flat"), _("4:3 within Flat"), N_("F")
));
_formats.push_back (
- new FixedFormat (137, libdcp::Size (1480, 1080), N_("137"), _("Academy"), N_("F")
+ new FixedFormat (1.37, libdcp::Size (1480, 1080), N_("137"), _("Academy"), N_("F")
));
_formats.push_back (
- new FixedFormat (166, libdcp::Size (1793, 1080), N_("166"), _("1.66"), N_("F")
+ new FixedFormat (1.66, libdcp::Size (1793, 1080), N_("166"), _("1.66"), N_("F")
));
_formats.push_back (
- new FixedFormat (166, libdcp::Size (1998, 1080), N_("166-in-flat"), _("1.66 within Flat"), N_("F")
+ new FixedFormat (1.66, libdcp::Size (1998, 1080), N_("166-in-flat"), _("1.66 within Flat"), N_("F")
));
_formats.push_back (
- new FixedFormat (178, libdcp::Size (1998, 1080), N_("178-in-flat"), _("16:9 within Flat"), N_("F")
+ new FixedFormat (1.78, libdcp::Size (1998, 1080), N_("178-in-flat"), _("16:9 within Flat"), N_("F")
));
_formats.push_back (
- new FixedFormat (178, libdcp::Size (1920, 1080), N_("178"), _("16:9"), N_("F")
+ new FixedFormat (1.78, libdcp::Size (1920, 1080), N_("178"), _("16:9"), N_("F")
));
_formats.push_back (
- new FixedFormat (185, libdcp::Size (1998, 1080), N_("185"), _("Flat"), N_("F")
+ new FixedFormat (1.85, libdcp::Size (1998, 1080), N_("185"), _("Flat"), N_("F")
));
_formats.push_back (
- new FixedFormat (178, libdcp::Size (2048, 858), N_("178-in-scope"), _("16:9 within Scope"), N_("S")
+ new FixedFormat (1.78, libdcp::Size (2048, 858), N_("178-in-scope"), _("16:9 within Scope"), N_("S")
));
_formats.push_back (
- new FixedFormat (239, libdcp::Size (2048, 858), N_("239"), _("Scope"), N_("S")
+ new FixedFormat (2.39, libdcp::Size (2048, 858), N_("239"), _("Scope"), N_("S")
));
_formats.push_back (
return _formats;
}
-/** @param r Ratio multiplied by 100 (e.g. 185)
+/** @param r Ratio
* @param dcp Size (in pixels) of the images that we should put in a DCP.
* @param id ID (e.g. 185)
* @param n Nick name (e.g. Flat)
*/
-FixedFormat::FixedFormat (int r, libdcp::Size dcp, string id, string n, string d)
+FixedFormat::FixedFormat (float r, libdcp::Size dcp, string id, string n, string d)
: Format (dcp, id, n, d)
, _ratio (r)
{
int
Format::dcp_padding (shared_ptr<const Film> f) const
{
- int p = rint ((_dcp_size.width - (_dcp_size.height * ratio_as_float(f))) / 2.0);
+ int p = rint ((_dcp_size.width - (_dcp_size.height * ratio(f))) / 2.0);
/* This comes out -ve for Scope; bodge it */
if (p < 0) {
}
float
-Format::container_ratio_as_float () const
+Format::container_ratio () const
{
return static_cast<float> (_dcp_size.width) / _dcp_size.height;
}
}
-int
-VariableFormat::ratio_as_integer (shared_ptr<const Film> f) const
-{
- return rint (ratio_as_float (f) * 100);
-}
-
float
-VariableFormat::ratio_as_float (shared_ptr<const Film> f) const
+VariableFormat::ratio (shared_ptr<const Film> f) const
{
libdcp::Size const c = f->cropped_size (f->size ());
return float (c.width) / c.height;
, _dci_name (d)
{}
- /** @return the aspect ratio multiplied by 100
- * (e.g. 239 for Cinemascope 2.39:1)
- */
- virtual int ratio_as_integer (boost::shared_ptr<const Film> f) const = 0;
-
- /** @return the ratio as a floating point number */
- virtual float ratio_as_float (boost::shared_ptr<const Film> f) const = 0;
-
- /** @return the ratio of the container (including any padding) as a floating point number */
- float container_ratio_as_float () const;
+ /** @return the ratio of the container (including any padding) */
+ float container_ratio () const;
int dcp_padding (boost::shared_ptr<const Film> f) const;
static void setup_formats ();
protected:
+ /** @return the ratio */
+ virtual float ratio (boost::shared_ptr<const Film> f) const = 0;
+
/** libdcp::Size in pixels of the images that we should
* put in a DCP for this ratio. This size will not correspond
* to the ratio when we are doing things like 16:9 in a Flat frame.
class FixedFormat : public Format
{
public:
- FixedFormat (int, libdcp::Size, std::string, std::string, std::string);
+ FixedFormat (float, libdcp::Size, std::string, std::string, std::string);
- int ratio_as_integer (boost::shared_ptr<const Film>) const {
+ float ratio (boost::shared_ptr<const Film>) const {
return _ratio;
}
- float ratio_as_float (boost::shared_ptr<const Film>) const {
- return _ratio / 100.0;
- }
-
std::string name () const;
private:
- /** Ratio expressed as the actual ratio multiplied by 100 */
- int _ratio;
+ float _ratio;
};
class VariableFormat : public Format
public:
VariableFormat (libdcp::Size, std::string, std::string, std::string);
- int ratio_as_integer (boost::shared_ptr<const Film> f) const;
- float ratio_as_float (boost::shared_ptr<const Film> f) const;
+ float ratio (boost::shared_ptr<const Film> f) const;
std::string name () const;
};
return true;
}
- repeat_last_video ();
+ emit_video (_image, true, double (video_frame()) / frames_per_second());
return false;
}
delete magick_image;
- image = image->crop (_film->crop(), true);
-
- emit_video (image, 0);
+ _image = image->crop (_film->crop(), true);
+
+ emit_video (_image, false, double (video_frame()) / frames_per_second());
++_iter;
return false;
OutputChanged ();
}
}
+
+float
+ImageMagickDecoder::frames_per_second () const
+{
+ return _film->source_frame_rate ();
+}
public:
ImageMagickDecoder (boost::shared_ptr<Film>, DecodeOptions);
- float frames_per_second () const {
- /* We don't know */
- return 0;
- }
+ float frames_per_second () const;
libdcp::Size native_size () const;
std::list<std::string> _files;
std::list<std::string>::iterator _iter;
+
+ boost::shared_ptr<Image> _image;
};
set_state (FINISHED_ERROR);
string m = String::compose (_("An error occurred whilst handling the file %1."), boost::filesystem::path (e.filename()).leaf());
-
- boost::filesystem::space_info const s = boost::filesystem::space (e.filename());
- if (s.available < pow (1024, 3)) {
- m += N_("\n\n");
- m += _("The drive that the film is stored on is low in disc space. Free some more space and try again.");
+
+ try {
+ boost::filesystem::space_info const s = boost::filesystem::space (e.filename());
+ if (s.available < pow (1024, 3)) {
+ m += N_("\n\n");
+ m += _("The drive that the film is stored on is low in disc space. Free some more space and try again.");
+ }
+ } catch (...) {
+
}
set_error (e.what(), m);
#include "i18n.h"
using std::min;
+using std::cout;
+using std::list;
using boost::shared_ptr;
Matcher::Matcher (shared_ptr<Log> log, int sample_rate, float frames_per_second)
- : AudioVideoProcessor (log)
+ : Processor (log)
, _sample_rate (sample_rate)
, _frames_per_second (frames_per_second)
, _video_frames (0)
, _audio_frames (0)
+ , _had_first_video (false)
+ , _had_first_audio (false)
{
}
void
-Matcher::process_video (boost::shared_ptr<Image> i, bool same, boost::shared_ptr<Subtitle> s)
+Matcher::process_video (boost::shared_ptr<Image> image, bool same, boost::shared_ptr<Subtitle> sub, double t)
{
- Video (i, same, s);
- _video_frames++;
+ _pixel_format = image->pixel_format ();
+ _size = image->size ();
- _pixel_format = i->pixel_format ();
- _size = i->size ();
+ _log->log(String::compose("Matcher video @ %1 [audio=%2, video=%3, pending_audio=%4]", t, _audio_frames, _video_frames, _pending_audio.size()));
+
+ if (!_first_input) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_video = !_had_first_video;
+ _had_first_video = true;
+
+ if (this_is_first_video && _had_first_audio) {
+ /* First video since we got audio */
+ fix_start (t);
+ }
+
+ /* Video before audio is fine, since we can make up an arbitrary difference
+ with audio samples (contrasting with video which is quantised to frames)
+ */
+
+ /* Difference between where this video is and where it should be */
+ double const delta = t - _first_input.get() - _video_frames / _frames_per_second;
+ double const one_frame = 1 / _frames_per_second;
+
+ if (delta > one_frame) {
+ /* Insert frames to make up the difference */
+ int const extra = rint (delta / one_frame);
+ for (int i = 0; i < extra; ++i) {
+ repeat_last_video ();
+ _log->log (String::compose ("Extra video frame inserted at %1s", _video_frames / _frames_per_second));
+ }
+ }
+
+ if (delta > -one_frame) {
+ Video (image, same, sub);
+ ++_video_frames;
+ } else {
+ /* We are omitting a frame to keep things right */
+ _log->log (String::compose ("Frame removed at %1s", t));
+ }
+
+ _last_image = image;
+ _last_subtitle = sub;
}
void
-Matcher::process_audio (boost::shared_ptr<AudioBuffers> b)
+Matcher::process_audio (boost::shared_ptr<AudioBuffers> b, double t)
{
- Audio (b);
- _audio_frames += b->frames ();
-
_channels = b->channels ();
+
+ _log->log (String::compose ("Matcher audio @ %1 [video=%2, audio=%3, pending_audio=%4]", t, _video_frames, _audio_frames, _pending_audio.size()));
+
+ if (!_first_input) {
+ _first_input = t;
+ }
+
+ bool const this_is_first_audio = _had_first_audio;
+ _had_first_audio = true;
+
+ if (!_had_first_video) {
+ /* No video yet; we must postpone these data until we have some */
+ _pending_audio.push_back (AudioRecord (b, t));
+ } else if (this_is_first_audio && !_had_first_video) {
+ /* First audio since we got video */
+ _pending_audio.push_back (AudioRecord (b, t));
+ fix_start (_first_input.get ());
+ } else {
+ /* Normal running. We assume audio time stamps are consecutive */
+ Audio (b);
+ _audio_frames += b->frames ();
+ }
}
void
/* We won't do anything */
return;
}
+
+ _log->log (String::compose ("Matcher has seen %1 video frames (which equals %2 audio frames) and %3 audio frames",
+ _video_frames, video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second), _audio_frames));
- int64_t audio_short_by_frames = video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second) - _audio_frames;
-
- _log->log (
- String::compose (
- N_("Matching processor has seen %1 video frames (which equals %2 audio frames) and %3 audio frames"),
- _video_frames,
- video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second),
- _audio_frames
- )
- );
+ match ((double (_audio_frames) / _sample_rate) - (double (_video_frames) / _frames_per_second));
+}
+
+void
+Matcher::fix_start (double first_video)
+{
+ assert (!_pending_audio.empty ());
+
+ _log->log (String::compose ("Fixing start; video at %1, audio at %2", first_video, _pending_audio.front().time));
+
+ match (first_video - _pending_audio.front().time);
+
+ for (list<AudioRecord>::iterator i = _pending_audio.begin(); i != _pending_audio.end(); ++i) {
+ process_audio (i->audio, i->time);
+ }
- if (audio_short_by_frames < 0) {
-
- _log->log (String::compose (N_("%1 too many audio frames"), -audio_short_by_frames));
-
- /* We have seen more audio than video. Emit enough black video frames so that we reverse this */
- int const black_video_frames = ceil (-audio_short_by_frames * _frames_per_second / _sample_rate);
+ _pending_audio.clear ();
+}
+
+void
+Matcher::match (double extra_video_needed)
+{
+ _log->log (String::compose ("Match %1", extra_video_needed));
+
+ if (extra_video_needed > 0) {
+
+ /* Emit black video frames */
+ int const black_video_frames = ceil (extra_video_needed * _frames_per_second);
+
_log->log (String::compose (N_("Emitting %1 frames of black video"), black_video_frames));
shared_ptr<Image> black (new SimpleImage (_pixel_format.get(), _size.get(), true));
black->make_black ();
for (int i = 0; i < black_video_frames; ++i) {
Video (black, i != 0, shared_ptr<Subtitle>());
+ ++_video_frames;
}
-
- /* Now recompute our check value */
- audio_short_by_frames = video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second) - _audio_frames;
+
+ extra_video_needed -= black_video_frames / _frames_per_second;
}
-
- if (audio_short_by_frames > 0) {
- _log->log (String::compose (N_("Emitted %1 too few audio frames"), audio_short_by_frames));
+
+ if (extra_video_needed < 0) {
+
+ /* Emit silence */
+
+ int64_t to_do = -extra_video_needed * _sample_rate;
+ _log->log (String::compose (N_("Emitting %1 frames of silence"), to_do));
/* Do things in half second blocks as I think there may be limits
to what FFmpeg (and in particular the resampler) can cope with.
shared_ptr<AudioBuffers> b (new AudioBuffers (_channels.get(), block));
b->make_silent ();
- int64_t to_do = audio_short_by_frames;
while (to_do > 0) {
int64_t const this_time = min (to_do, block);
b->set_frames (this_time);
}
}
}
+
+void
+Matcher::repeat_last_video ()
+{
+ if (!_last_image) {
+ _last_image.reset (new SimpleImage (_pixel_format.get(), _size.get(), true));
+ _last_image->make_black ();
+ }
+
+ Video (_last_image, true, _last_subtitle);
+ ++_video_frames;
+}
+
#include "processor.h"
#include "ffmpeg_compatibility.h"
-class Matcher : public AudioVideoProcessor
+class Matcher : public Processor, public TimedAudioSink, public TimedVideoSink, public AudioSource, public VideoSource
{
public:
Matcher (boost::shared_ptr<Log> log, int sample_rate, float frames_per_second);
- void process_video (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s);
- void process_audio (boost::shared_ptr<AudioBuffers>);
+ void process_video (boost::shared_ptr<Image> i, bool, boost::shared_ptr<Subtitle> s, double);
+ void process_audio (boost::shared_ptr<AudioBuffers>, double);
void process_end ();
private:
+ void fix_start (double);
+ void match (double);
+ void repeat_last_video ();
+
int _sample_rate;
float _frames_per_second;
int _video_frames;
boost::optional<AVPixelFormat> _pixel_format;
boost::optional<libdcp::Size> _size;
boost::optional<int> _channels;
+
+ struct AudioRecord {
+ AudioRecord (boost::shared_ptr<AudioBuffers> a, double t)
+ : audio (a)
+ , time (t)
+ {}
+
+ boost::shared_ptr<AudioBuffers> audio;
+ double time;
+ };
+
+ std::list<AudioRecord> _pending_audio;
+
+ boost::optional<double> _first_input;
+ boost::shared_ptr<Image> _last_image;
+ boost::shared_ptr<Subtitle> _last_subtitle;
+
+ bool _had_first_video;
+ bool _had_first_audio;
};
{}
};
+class TimedAudioVideoProcessor : public Processor, public TimedVideoSource, public TimedVideoSink, public TimedAudioSource, public TimedAudioSink
+{
+public:
+ TimedAudioVideoProcessor (boost::shared_ptr<Log> log)
+ : Processor (log)
+ {}
+};
+
+
/** @class AudioProcessor
* @brief A processor which handles just audio data.
*/
{}
};
+class TimedVideoProcessor : public Processor, public TimedVideoSource, public TimedVideoSink
+{
+public:
+ TimedVideoProcessor (boost::shared_ptr<Log> log)
+ : Processor (log)
+ {}
+};
+
#endif
to what FFmpeg (and in particular the resampler) can cope with.
*/
sf_count_t const block = _audio_stream->sample_rate() / 2;
-
shared_ptr<AudioBuffers> audio (new AudioBuffers (_audio_stream->channels(), block));
+ sf_count_t done = 0;
while (frames > 0) {
sf_count_t const this_time = min (block, frames);
for (size_t i = 0; i < sndfiles.size(); ++i) {
}
audio->set_frames (this_time);
- Audio (audio);
+ Audio (audio, double(done) / _audio_stream->sample_rate());
+ done += this_time;
frames -= this_time;
}
{
assert (_encoder);
- if (f->audio_stream()) {
- shared_ptr<AudioStream> st = f->audio_stream();
- _matcher.reset (new Matcher (f->log(), st->sample_rate(), f->source_frame_rate()));
- _delay_line.reset (new DelayLine (f->log(), st->channels(), f->audio_delay() * st->sample_rate() / 1000));
- _gain.reset (new Gain (f->log(), f->audio_gain()));
- }
+ shared_ptr<AudioStream> st = f->audio_stream();
+ _matcher.reset (new Matcher (f->log(), st->sample_rate(), f->source_frame_rate()));
+ _delay_line.reset (new DelayLine (f->log(), f->audio_delay() / 1000.0f));
+ _gain.reset (new Gain (f->log(), f->audio_gain()));
/* Set up the decoder to use the film's set streams */
_decoders.video->set_subtitle_stream (f->subtitle_stream ());
- if (_decoders.audio) {
- _decoders.audio->set_audio_stream (f->audio_stream ());
- }
+ _decoders.audio->set_audio_stream (f->audio_stream ());
- if (_matcher) {
- _decoders.video->connect_video (_matcher);
- _matcher->connect_video (_encoder);
- } else {
- _decoders.video->connect_video (_encoder);
- }
+ _decoders.video->connect_video (_delay_line);
+ _delay_line->connect_video (_matcher);
+ _matcher->connect_video (_encoder);
- if (_matcher && _delay_line && _decoders.audio) {
- _decoders.audio->connect_audio (_delay_line);
- _delay_line->connect_audio (_matcher);
- _matcher->connect_audio (_gain);
- _gain->connect_audio (_encoder);
- }
+ _decoders.audio->connect_audio (_delay_line);
+ _delay_line->connect_audio (_matcher);
+ _matcher->connect_audio (_gain);
+ _gain->connect_audio (_encoder);
}
/** Run the decoder, passing its output to the encoder, until the decoder
throw;
}
- if (_delay_line) {
- _delay_line->process_end ();
- }
- if (_matcher) {
- _matcher->process_end ();
- }
- if (_gain) {
- _gain->process_end ();
- }
+ _delay_line->process_end ();
+ _matcher->process_end ();
+ _gain->process_end ();
_encoder->process_end ();
}
}
}
}
+
+LocaleGuard::LocaleGuard ()
+ : _old (0)
+{
+ char const * old = setlocale (LC_NUMERIC, 0);
+
+ if (old) {
+ _old = strdup (old);
+ if (strcmp (_old, "POSIX")) {
+ setlocale (LC_NUMERIC, "POSIX");
+ }
+ }
+}
+
+LocaleGuard::~LocaleGuard ()
+{
+ setlocale (LC_NUMERIC, _old);
+ free (_old);
+}
extern bool still_image_file (std::string);
extern std::pair<std::string, int> cpu_info ();
+class LocaleGuard
+{
+public:
+ LocaleGuard ();
+ ~LocaleGuard ();
+
+private:
+ char* _old;
+};
+
+
#endif
#include "i18n.h"
+using std::cout;
using boost::shared_ptr;
using boost::optional;
* @param t Time of the frame within the source, in seconds.
*/
void
-VideoDecoder::emit_video (shared_ptr<Image> image, double t)
+VideoDecoder::emit_video (shared_ptr<Image> image, bool same, double t)
{
shared_ptr<Subtitle> sub;
if (_timed_subtitle && _timed_subtitle->displayed_at (t)) {
sub = _timed_subtitle->subtitle ();
}
- signal_video (image, false, sub);
- _last_source_time = t;
-}
-
-/** Called by subclasses to repeat the last video frame that we
- * passed to emit_video(). If emit_video hasn't yet been called,
- * we will generate a black frame.
- */
-void
-VideoDecoder::repeat_last_video ()
-{
- if (!_last_image) {
- _last_image.reset (new SimpleImage (pixel_format(), native_size(), true));
- _last_image->make_black ();
- }
-
- signal_video (_last_image, true, _last_subtitle);
-}
-
-/** Emit our signal to say that some video data is ready.
- * @param image Video frame.
- * @param same true if `image' is the same as the last one we emitted.
- * @param sub Subtitle for this frame, or 0.
- */
-void
-VideoDecoder::signal_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub)
-{
- TIMING (N_("Decoder emits %1"), _video_frame);
- Video (image, same, sub);
+ Video (image, same, sub, t);
++_video_frame;
-
- _last_image = image;
- _last_subtitle = sub;
+
+ _last_source_time = t;
}
/** Set up the current subtitle. This will be put onto frames that
#include "stream.h"
#include "decoder.h"
-class VideoDecoder : public VideoSource, public virtual Decoder
+class VideoDecoder : public TimedVideoSource, public virtual Decoder
{
public:
VideoDecoder (boost::shared_ptr<Film>, DecodeOptions);
virtual PixelFormat pixel_format () const = 0;
- void emit_video (boost::shared_ptr<Image>, double);
+ void emit_video (boost::shared_ptr<Image>, bool, double);
void emit_subtitle (boost::shared_ptr<TimedSubtitle>);
- void repeat_last_video ();
/** Subtitle stream to use when decoding */
boost::shared_ptr<SubtitleStream> _subtitle_stream;
std::vector<boost::shared_ptr<SubtitleStream> > _subtitle_streams;
private:
- void signal_video (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>);
-
int _video_frame;
double _last_source_time;
boost::shared_ptr<TimedSubtitle> _timed_subtitle;
-
- boost::shared_ptr<Image> _last_image;
- boost::shared_ptr<Subtitle> _last_subtitle;
};
#endif
virtual void process_video (boost::shared_ptr<Image> i, bool same, boost::shared_ptr<Subtitle> s) = 0;
};
+class TimedVideoSink
+{
+public:
+ /** Call with a frame of video.
+ * @param i Video frame image.
+ * @param same true if i is the same as last time we were called.
+ * @param s A subtitle that should be on this frame, or 0.
+ * @param t Source timestamp.
+ */
+ virtual void process_video (boost::shared_ptr<Image> i, bool same, boost::shared_ptr<Subtitle> s, double t) = 0;
+};
+
#endif
{
Video.connect (bind (&VideoSink::process_video, s, _1, _2, _3));
}
+
+void
+TimedVideoSource::connect_video (shared_ptr<TimedVideoSink> s)
+{
+ Video.connect (bind (&TimedVideoSink::process_video, s, _1, _2, _3, _4));
+}
#include "util.h"
class VideoSink;
+class TimedVideoSink;
class Subtitle;
class Image;
-/** @class VideoSink
- * @param A class that emits video data.
+/** @class VideoSource
+ * @param A class that emits video data without timestamps.
*/
class VideoSource
{
void connect_video (boost::shared_ptr<VideoSink>);
};
+/** @class TimedVideoSource
+ * @param A class that emits video data with timestamps.
+ */
+class TimedVideoSource
+{
+public:
+
+ /** Emitted when a video frame is ready.
+ * First parameter is the video image.
+ * Second parameter is true if the image is the same as the last one that was emitted.
+ * Third parameter is either 0 or a subtitle that should be on this frame.
+ * Fourth parameter is the source timestamp of this frame.
+ */
+ boost::signals2::signal<void (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>, double)> Video;
+
+ void connect_video (boost::shared_ptr<TimedVideoSink>);
+};
+
#endif
: wxPanel (p)
, _panel (new wxPanel (this))
, _slider (new wxSlider (this, wxID_ANY, 0, 0, 4096))
+ , _back_button (new wxButton (this, wxID_ANY, wxT("<")))
+ , _forward_button (new wxButton (this, wxID_ANY, wxT(">")))
+ , _frame (new wxStaticText (this, wxID_ANY, wxT("")))
+ , _timecode (new wxStaticText (this, wxID_ANY, wxT("")))
, _play_button (new wxToggleButton (this, wxID_ANY, _("Play")))
, _display_frame_x (0)
, _got_frame (false)
_v_sizer->Add (_panel, 1, wxEXPAND);
wxBoxSizer* h_sizer = new wxBoxSizer (wxHORIZONTAL);
+
+ wxBoxSizer* time_sizer = new wxBoxSizer (wxVERTICAL);
+ time_sizer->Add (_frame, 0, wxEXPAND);
+ time_sizer->Add (_timecode, 0, wxEXPAND);
+
+ h_sizer->Add (_back_button, 0, wxALL, 2);
+ h_sizer->Add (time_sizer, 0, wxEXPAND);
+ h_sizer->Add (_forward_button, 0, wxALL, 2);
h_sizer->Add (_play_button, 0, wxEXPAND);
h_sizer->Add (_slider, 1, wxEXPAND);
_v_sizer->Add (h_sizer, 0, wxEXPAND | wxALL, 6);
+ _frame->SetMinSize (wxSize (84, -1));
+ _back_button->SetMinSize (wxSize (32, -1));
+ _forward_button->SetMinSize (wxSize (32, -1));
+
_panel->Connect (wxID_ANY, wxEVT_PAINT, wxPaintEventHandler (FilmViewer::paint_panel), 0, this);
_panel->Connect (wxID_ANY, wxEVT_SIZE, wxSizeEventHandler (FilmViewer::panel_sized), 0, this);
_slider->Connect (wxID_ANY, wxEVT_SCROLL_THUMBTRACK, wxScrollEventHandler (FilmViewer::slider_moved), 0, this);
_slider->Connect (wxID_ANY, wxEVT_SCROLL_PAGEDOWN, wxScrollEventHandler (FilmViewer::slider_moved), 0, this);
_play_button->Connect (wxID_ANY, wxEVT_COMMAND_TOGGLEBUTTON_CLICKED, wxCommandEventHandler (FilmViewer::play_clicked), 0, this);
_timer.Connect (wxID_ANY, wxEVT_TIMER, wxTimerEventHandler (FilmViewer::timer), 0, this);
+ _back_button->Connect (wxID_ANY, wxEVT_COMMAND_BUTTON_CLICKED, wxCommandEventHandler (FilmViewer::back_clicked), 0, this);
+ _forward_button->Connect (wxID_ANY, wxEVT_COMMAND_BUTTON_CLICKED, wxCommandEventHandler (FilmViewer::forward_clicked), 0, this);
set_film (f);
if (_decoders.video == 0) {
break;
}
- _decoders.video->Video.connect (bind (&FilmViewer::process_video, this, _1, _2, _3));
+ _decoders.video->Video.connect (bind (&FilmViewer::process_video, this, _1, _2, _3, _4));
_decoders.video->OutputChanged.connect (boost::bind (&FilmViewer::decoder_changed, this));
_decoders.video->set_subtitle_stream (_film->subtitle_stream());
calculate_sizes ();
Format const * format = _film->format ();
float const panel_ratio = static_cast<float> (_panel_size.width) / _panel_size.height;
- float const film_ratio = format ? format->container_ratio_as_float () : 1.78;
+ float const film_ratio = format ? format->container_ratio () : 1.78;
if (panel_ratio < film_ratio) {
/* panel is less widscreen than the film; clamp width */
}
void
-FilmViewer::process_video (shared_ptr<Image> image, bool, shared_ptr<Subtitle> sub)
+FilmViewer::process_video (shared_ptr<Image> image, bool, shared_ptr<Subtitle> sub, double t)
{
_raw_frame = image;
_raw_sub = sub;
raw_to_display ();
_got_frame = true;
+
+ double const fps = _decoders.video->frames_per_second ();
+ _frame->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps))));
+
+ double w = t;
+ int const h = (w / 3600);
+ w -= h * 3600;
+ int const m = (w / 60);
+ w -= m * 60;
+ int const s = floor (w);
+ w -= s;
+ int const f = rint (w * fps);
+ _timecode->SetLabel (wxString::Format (wxT("%02d:%02d:%02d:%02d"), h, m, s, f));
}
void
_play_button->Enable (!a);
}
+void
+FilmViewer::back_clicked (wxCommandEvent &)
+{
+ if (!_decoders.video) {
+ return;
+ }
+
+ _decoders.video->seek_back ();
+ get_frame ();
+ _panel->Refresh ();
+ _panel->Update ();
+}
+
+void
+FilmViewer::forward_clicked (wxCommandEvent &)
+{
+ if (!_decoders.video) {
+ return;
+ }
+
+ _decoders.video->seek_forward ();
+ get_frame ();
+ _panel->Refresh ();
+ _panel->Update ();
+}
void slider_moved (wxScrollEvent &);
void play_clicked (wxCommandEvent &);
void timer (wxTimerEvent &);
- void process_video (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>);
+ void process_video (boost::shared_ptr<Image>, bool, boost::shared_ptr<Subtitle>, double);
void calculate_sizes ();
void check_play_state ();
void update_from_raw ();
void raw_to_display ();
void get_frame ();
void active_jobs_changed (bool);
+ void back_clicked (wxCommandEvent &);
+ void forward_clicked (wxCommandEvent &);
boost::shared_ptr<Film> _film;
wxSizer* _v_sizer;
wxPanel* _panel;
wxSlider* _slider;
+ wxButton* _back_button;
+ wxButton* _forward_button;
+ wxStaticText* _frame;
+ wxStaticText* _timecode;
wxToggleButton* _play_button;
wxTimer _timer;
#include "job_manager.h"
#include "util.h"
#include "exceptions.h"
-#include "delay_line.h"
#include "image.h"
#include "log.h"
#include "dcp_video_frame.h"
Format const * f = Format::from_nickname ("Flat");
BOOST_CHECK (f);
- BOOST_CHECK_EQUAL (f->ratio_as_integer(shared_ptr<const Film> ()), 185);
+ BOOST_CHECK_EQUAL (f->dcp_size().width, 1998);
+ BOOST_CHECK_EQUAL (f->dcp_size().height, 1080);
f = Format::from_nickname ("Scope");
BOOST_CHECK (f);
- BOOST_CHECK_EQUAL (f->ratio_as_integer(shared_ptr<const Film> ()), 239);
+ BOOST_CHECK_EQUAL (f->dcp_size().width, 2048);
+ BOOST_CHECK_EQUAL (f->dcp_size().height, 858);
}
/* Test VariableFormat-based scaling of content */
void do_log (string) {}
};
-void
-do_positive_delay_line_test (int delay_length, int data_length)
-{
- shared_ptr<NullLog> log (new NullLog);
-
- DelayLine d (log, 6, delay_length);
- shared_ptr<AudioBuffers> data (new AudioBuffers (6, data_length));
-
- int in = 0;
- int out = 0;
- int returned = 0;
- int zeros = 0;
-
- for (int i = 0; i < 64; ++i) {
- for (int j = 0; j < data_length; ++j) {
- for (int c = 0; c < 6; ++c ) {
- data->data(c)[j] = in;
- ++in;
- }
- }
-
- /* This only works because the delay line modifies the parameter */
- d.process_audio (data);
- returned += data->frames ();
-
- for (int j = 0; j < data->frames(); ++j) {
- if (zeros < delay_length) {
- for (int c = 0; c < 6; ++c) {
- BOOST_CHECK_EQUAL (data->data(c)[j], 0);
- }
- ++zeros;
- } else {
- for (int c = 0; c < 6; ++c) {
- BOOST_CHECK_EQUAL (data->data(c)[j], out);
- ++out;
- }
- }
- }
- }
-
- BOOST_CHECK_EQUAL (returned, 64 * data_length);
-}
-
-void
-do_negative_delay_line_test (int delay_length, int data_length)
-{
- shared_ptr<NullLog> log (new NullLog);
-
- DelayLine d (log, 6, delay_length);
- shared_ptr<AudioBuffers> data (new AudioBuffers (6, data_length));
-
- int in = 0;
- int out = -delay_length * 6;
- int returned = 0;
-
- for (int i = 0; i < 256; ++i) {
- data->set_frames (data_length);
- for (int j = 0; j < data_length; ++j) {
- for (int c = 0; c < 6; ++c) {
- data->data(c)[j] = in;
- ++in;
- }
- }
-
- /* This only works because the delay line modifies the parameter */
- d.process_audio (data);
- returned += data->frames ();
-
- for (int j = 0; j < data->frames(); ++j) {
- for (int c = 0; c < 6; ++c) {
- BOOST_CHECK_EQUAL (data->data(c)[j], out);
- ++out;
- }
- }
- }
-
- returned += -delay_length;
- BOOST_CHECK_EQUAL (returned, 256 * data_length);
-}
-
-BOOST_AUTO_TEST_CASE (delay_line_test)
-{
- do_positive_delay_line_test (64, 128);
- do_positive_delay_line_test (128, 64);
- do_positive_delay_line_test (3, 512);
- do_positive_delay_line_test (512, 3);
-
- do_positive_delay_line_test (0, 64);
-
- do_negative_delay_line_test (-64, 128);
- do_negative_delay_line_test (-128, 64);
- do_negative_delay_line_test (-3, 512);
- do_negative_delay_line_test (-512, 3);
-}
-
BOOST_AUTO_TEST_CASE (md5_digest_test)
{
string const t = md5_digest ("test/md5.test");
SetOutPath "$INSTDIR\bin"
File "%deps%/bin/asdcp-libdcp.dll"
-File "%deps%/bin/avcodec-54.dll"
+File "%deps%/bin/avcodec-55.dll"
File "%deps%/bin/avfilter-3.dll"
-File "%deps%/bin/avformat-54.dll"
+File "%deps%/bin/avformat-55.dll"
File "%deps%/bin/avutil-52.dll"
File "%deps%/bin/dcp.dll"
File "%deps%/bin/libintl-8.dll"
SetOutPath "$INSTDIR\bin"
File "%deps%/bin/asdcp-libdcp.dll"
-File "%deps%/bin/avcodec-54.dll"
+File "%deps%/bin/avcodec-55.dll"
File "%deps%/bin/avfilter-3.dll"
-File "%deps%/bin/avformat-54.dll"
+File "%deps%/bin/avformat-55.dll"
File "%deps%/bin/avutil-52.dll"
File "%deps%/bin/dcp.dll"
File "%deps%/bin/libintl-8.dll"
import sys
APPNAME = 'dvdomatic'
-VERSION = '0.84pre'
+VERSION = '0.84beta5'
def options(opt):
opt.load('compiler_cxx')