*/
#include "audio_analysis.h"
+#include "audio_buffers.h"
#include "analyse_audio_job.h"
#include "compose.hpp"
#include "film.h"
player->Audio.connect (bind (&AnalyseAudioJob::audio, this, _1, _2));
- _samples_per_point = max (int64_t (1), _film->time_to_audio_frames (_film->length()) / _num_points);
+ int64_t const len = _film->length().frames (_film->audio_frame_rate());
+ _samples_per_point = max (int64_t (1), len / _num_points);
_current.resize (_film->audio_channels ());
_analysis.reset (new AudioAnalysis (_film->audio_channels ()));
_done = 0;
- AudioFrame const len = _film->time_to_audio_frames (_film->length ());
while (!player->pass ()) {
set_progress (double (_done) / len);
}
#include "job.h"
#include "audio_analysis.h"
#include "types.h"
+#include "dcpomatic_time.h"
class AudioBuffers;
class AudioContent;
void audio (boost::shared_ptr<const AudioBuffers>, DCPTime);
boost::weak_ptr<AudioContent> _content;
- AudioFrame _done;
+ int64_t _done;
int64_t _samples_per_point;
std::vector<AudioPoint> _current;
{
return String::compose ("audio: channels %1, length %2, raw rate %3, out rate %4", audio_channels(), audio_length(), content_audio_frame_rate(), output_audio_frame_rate());
}
-
-/** Note: this is not particularly fast, as the FrameRateChange lookup
- * is not very intelligent.
- *
- * @param t Some duration to convert.
- * @param at The time within the DCP to get the active frame rate change from; i.e. a point at which
- * the `controlling' video content is active.
- */
-AudioFrame
-AudioContent::time_to_content_audio_frames (DCPTime t, DCPTime at) const
-{
- shared_ptr<const Film> film = _film.lock ();
- assert (film);
-
- /* Consider the case where we're running a 25fps video at 24fps (i.e. slow)
- Our audio is at 44.1kHz. We will resample it to 48000 * 25 / 24 and then
- run it at 48kHz (i.e. slow, to match).
-
- After 1 second, we'll have run the equivalent of 44.1kHz * 24 / 25 samples
- in the source.
- */
-
- return rint (t * content_audio_frame_rate() * film->active_frame_rate_change(at).speed_up / TIME_HZ);
-}
std::string technical_summary () const;
virtual int audio_channels () const = 0;
- virtual AudioFrame audio_length () const = 0;
+ virtual ContentTime audio_length () const = 0;
virtual int content_audio_frame_rate () const = 0;
virtual int output_audio_frame_rate () const = 0;
virtual AudioMapping audio_mapping () const = 0;
return _audio_delay;
}
- Frame time_to_content_audio_frames (DCPTime, DCPTime) const;
-
private:
/** Gain to apply to audio in dB */
float _audio_gain;
if (!_audio_position) {
shared_ptr<const Film> film = _film.lock ();
assert (film);
- FrameRateChange frc = film->active_frame_rate_change (_audio_content->position ());
- _audio_position = (double (time) / frc.speed_up) * film->audio_frame_rate() / TIME_HZ;
+ _audio_position = time;
}
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (data, _audio_position.get ())));
- _audio_position = _audio_position.get() + data->frames ();
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (_audio_position.get (), data)));
+ _audio_position = _audio_position.get() + ContentTime (data->frames (), _audio_content->output_audio_frame_rate ());
}
void
shared_ptr<const AudioBuffers> b = _resampler->flush ();
if (b) {
- _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (b, _audio_position.get ())));
- _audio_position = _audio_position.get() + b->frames ();
+ _pending.push_back (shared_ptr<DecodedAudio> (new DecodedAudio (_audio_position.get (), b)));
+ _audio_position = _audio_position.get() + ContentTime (b->frames (), _audio_content->output_audio_frame_rate ());
}
}
boost::shared_ptr<const AudioContent> _audio_content;
boost::shared_ptr<Resampler> _resampler;
- boost::optional<AudioFrame> _audio_position;
+ boost::optional<ContentTime> _audio_position;
};
#endif
--- /dev/null
+/*
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "audio_buffers.h"
+#include "audio_merger.h"
+
+using std::min;
+using std::max;
+using boost::shared_ptr;
+
+AudioMerger::AudioMerger (int channels, int frame_rate)
+ : _buffers (new AudioBuffers (channels, 0))
+ , _frame_rate (frame_rate)
+ , _last_pull (0)
+{
+
+}
+
+
+TimedAudioBuffers<DCPTime>
+AudioMerger::pull (DCPTime time)
+{
+ assert (time >= _last_pull);
+
+ TimedAudioBuffers<DCPTime> out;
+
+ int64_t const to_return = DCPTime (time - _last_pull).frames (_frame_rate);
+ out.audio.reset (new AudioBuffers (_buffers->channels(), to_return));
+ /* And this is how many we will get from our buffer */
+ int64_t const to_return_from_buffers = min (to_return, int64_t (_buffers->frames ()));
+
+ /* Copy the data that we have to the back end of the return buffer */
+ out.audio->copy_from (_buffers.get(), to_return_from_buffers, 0, to_return - to_return_from_buffers);
+ /* Silence any gap at the start */
+ out.audio->make_silent (0, to_return - to_return_from_buffers);
+
+ out.time = _last_pull;
+ _last_pull = time;
+
+ /* And remove the data we're returning from our buffers */
+ if (_buffers->frames() > to_return_from_buffers) {
+ _buffers->move (to_return_from_buffers, 0, _buffers->frames() - to_return_from_buffers);
+ }
+ _buffers->set_frames (_buffers->frames() - to_return_from_buffers);
+
+ return out;
+}
+
+void
+AudioMerger::push (shared_ptr<const AudioBuffers> audio, DCPTime time)
+{
+ assert (time >= _last_pull);
+
+ int64_t frame = time.frames (_frame_rate);
+ int64_t after = max (int64_t (_buffers->frames()), frame + audio->frames() - _last_pull.frames (_frame_rate));
+ _buffers->ensure_size (after);
+ _buffers->accumulate_frames (audio.get(), 0, frame - _last_pull.frames (_frame_rate), audio->frames ());
+ _buffers->set_frames (after);
+}
+
+TimedAudioBuffers<DCPTime>
+AudioMerger::flush ()
+{
+ if (_buffers->frames() == 0) {
+ return TimedAudioBuffers<DCPTime> ();
+ }
+
+ return TimedAudioBuffers<DCPTime> (_buffers, _last_pull);
+}
+
+void
+AudioMerger::clear (DCPTime t)
+{
+ _last_pull = t;
+ _buffers.reset (new AudioBuffers (_buffers->channels(), 0));
+}
/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
*/
-#include "audio_buffers.h"
#include "util.h"
-template <class T, class F>
+class AudioBuffers;
+
class AudioMerger
{
public:
- AudioMerger (int channels, boost::function<F (T)> t_to_f, boost::function<T (F)> f_to_t)
- : _buffers (new AudioBuffers (channels, 0))
- , _last_pull (0)
- , _t_to_f (t_to_f)
- , _f_to_t (f_to_t)
- {}
+ AudioMerger (int channels, int frame_rate);
/** Pull audio up to a given time; after this call, no more data can be pushed
* before the specified time.
*/
- TimedAudioBuffers<T>
- pull (T time)
- {
- assert (time >= _last_pull);
-
- TimedAudioBuffers<T> out;
-
- F const to_return = _t_to_f (time - _last_pull);
- out.audio.reset (new AudioBuffers (_buffers->channels(), to_return));
- /* And this is how many we will get from our buffer */
- F const to_return_from_buffers = min (to_return, _buffers->frames ());
-
- /* Copy the data that we have to the back end of the return buffer */
- out.audio->copy_from (_buffers.get(), to_return_from_buffers, 0, to_return - to_return_from_buffers);
- /* Silence any gap at the start */
- out.audio->make_silent (0, to_return - to_return_from_buffers);
-
- out.time = _last_pull;
- _last_pull = time;
-
- /* And remove the data we're returning from our buffers */
- if (_buffers->frames() > to_return_from_buffers) {
- _buffers->move (to_return_from_buffers, 0, _buffers->frames() - to_return_from_buffers);
- }
- _buffers->set_frames (_buffers->frames() - to_return_from_buffers);
-
- return out;
- }
-
- void
- push (boost::shared_ptr<const AudioBuffers> audio, T time)
- {
- assert (time >= _last_pull);
-
- F frame = _t_to_f (time);
- F after = max (_buffers->frames(), frame + audio->frames() - _t_to_f (_last_pull));
- _buffers->ensure_size (after);
- _buffers->accumulate_frames (audio.get(), 0, frame - _t_to_f (_last_pull), audio->frames ());
- _buffers->set_frames (after);
- }
-
- F min (F a, int b)
- {
- if (a < b) {
- return a;
- }
-
- return b;
- }
-
- F max (int a, F b)
- {
- if (a > b) {
- return a;
- }
-
- return b;
- }
-
- TimedAudioBuffers<T>
- flush ()
- {
- if (_buffers->frames() == 0) {
- return TimedAudioBuffers<T> ();
- }
-
- return TimedAudioBuffers<T> (_buffers, _last_pull);
- }
-
- void
- clear (DCPTime t)
- {
- _last_pull = t;
- _buffers.reset (new AudioBuffers (_buffers->channels(), 0));
- }
+ TimedAudioBuffers<DCPTime> pull (DCPTime time);
+ void push (boost::shared_ptr<const AudioBuffers> audio, DCPTime time);
+ TimedAudioBuffers<DCPTime> flush ();
+ void clear (DCPTime t);
private:
boost::shared_ptr<AudioBuffers> _buffers;
- T _last_pull;
- boost::function<F (T)> _t_to_f;
- boost::function<T (F)> _f_to_t;
+ int _frame_rate;
+ DCPTime _last_pull;
};
_paths.push_back ((*i)->content ());
}
_digest = node->string_child ("Digest");
- _position = node->number_child<DCPTime> ("Position");
- _trim_start = node->number_child<DCPTime> ("TrimStart");
- _trim_end = node->number_child<DCPTime> ("TrimEnd");
+ _position = DCPTime (node->number_child<double> ("Position"));
+ _trim_start = DCPTime (node->number_child<double> ("TrimStart"));
+ _trim_end = DCPTime (node->number_child<double> ("TrimEnd"));
}
Content::Content (shared_ptr<const Film> f, vector<shared_ptr<Content> > c)
node->add_child("Path")->add_child_text (i->string ());
}
node->add_child("Digest")->add_child_text (_digest);
- node->add_child("Position")->add_child_text (lexical_cast<string> (_position));
- node->add_child("TrimStart")->add_child_text (lexical_cast<string> (_trim_start));
- node->add_child("TrimEnd")->add_child_text (lexical_cast<string> (_trim_end));
+ node->add_child("Position")->add_child_text (lexical_cast<string> (_position.get ()));
+ node->add_child("TrimStart")->add_child_text (lexical_cast<string> (_trim_start.get ()));
+ node->add_child("TrimEnd")->add_child_text (lexical_cast<string> (_trim_end.get ()));
}
void
#include <boost/enable_shared_from_this.hpp>
#include <libxml++/libxml++.h>
#include "types.h"
+#include "dcpomatic_time.h"
namespace cxml {
class Node;
}
DCPTime end () const {
- return position() + length_after_trim() - 1;
+ return position() + length_after_trim();
}
DCPTime length_after_trim () const;
--- /dev/null
+/*
+ Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include "dcpomatic_time.h"
+
+ContentTime::ContentTime (DCPTime d, FrameRateChange f)
+ : Time (rint (d.get() * f.speed_up))
+{
+
+}
+
+DCPTime min (DCPTime a, DCPTime b)
+{
+ if (a < b) {
+ return a;
+ }
+
+ return b;
+}
--- /dev/null
+/*
+ Copyright (C) 2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef DCPOMATIC_TIME_H
+#define DCPOMATIC_TIME_H
+
+#include <cmath>
+#include <stdint.h>
+#include "frame_rate_change.h"
+
+class dcpomatic_round_up_test;
+
+class Time;
+
+/** A time in seconds, expressed as a number scaled up by Time::HZ. */
+class Time
+{
+public:
+ Time ()
+ : _t (0)
+ {}
+
+ explicit Time (int64_t t)
+ : _t (t)
+ {}
+
+ virtual ~Time () {}
+
+ int64_t get () const {
+ return _t;
+ }
+
+ double seconds () const {
+ return double (_t) / HZ;
+ }
+
+ template <typename T>
+ int64_t frames (T r) const {
+ return rint (_t * r / HZ);
+ }
+
+ operator bool () const {
+ return _t != 0;
+ }
+
+protected:
+ friend class dcptime_round_up_test;
+
+ int64_t _t;
+ static const int HZ = 96000;
+};
+
+class DCPTime;
+
+class ContentTime : public Time
+{
+public:
+ ContentTime () : Time () {}
+ explicit ContentTime (int64_t t) : Time (t) {}
+ ContentTime (int64_t n, int64_t d) : Time (n * HZ / d) {}
+ ContentTime (DCPTime d, FrameRateChange f);
+
+ bool operator< (ContentTime const & o) const {
+ return _t < o._t;
+ }
+
+ bool operator<= (ContentTime const & o) const {
+ return _t <= o._t;
+ }
+
+ bool operator== (ContentTime const & o) const {
+ return _t == o._t;
+ }
+
+ bool operator!= (ContentTime const & o) const {
+ return _t != o._t;
+ }
+
+ bool operator>= (ContentTime const & o) const {
+ return _t >= o._t;
+ }
+
+ bool operator> (ContentTime const & o) const {
+ return _t > o._t;
+ }
+
+ ContentTime operator+ (ContentTime const & o) const {
+ return ContentTime (_t + o._t);
+ }
+
+ ContentTime & operator+= (ContentTime const & o) {
+ _t += o._t;
+ return *this;
+ }
+
+ ContentTime operator- (ContentTime const & o) const {
+ return ContentTime (_t - o._t);
+ }
+
+ ContentTime & operator-= (ContentTime const & o) {
+ _t -= o._t;
+ return *this;
+ }
+
+ static ContentTime from_seconds (double s) {
+ return ContentTime (s * HZ);
+ }
+
+ template <class T>
+ static ContentTime from_frames (int64_t f, T r) {
+ return ContentTime (f * HZ / r);
+ }
+};
+
+class DCPTime : public Time
+{
+public:
+ DCPTime () : Time () {}
+ explicit DCPTime (int64_t t) : Time (t) {}
+ DCPTime (ContentTime t, FrameRateChange c) : Time (rint (t.get() / c.speed_up)) {}
+
+ bool operator< (DCPTime const & o) const {
+ return _t < o._t;
+ }
+
+ bool operator<= (DCPTime const & o) const {
+ return _t <= o._t;
+ }
+
+ bool operator== (DCPTime const & o) const {
+ return _t == o._t;
+ }
+
+ bool operator!= (DCPTime const & o) const {
+ return _t != o._t;
+ }
+
+ bool operator>= (DCPTime const & o) const {
+ return _t >= o._t;
+ }
+
+ bool operator> (DCPTime const & o) const {
+ return _t > o._t;
+ }
+
+ DCPTime operator+ (DCPTime const & o) const {
+ return DCPTime (_t + o._t);
+ }
+
+ DCPTime & operator+= (DCPTime const & o) {
+ _t += o._t;
+ return *this;
+ }
+
+ DCPTime operator- (DCPTime const & o) const {
+ return DCPTime (_t - o._t);
+ }
+
+ DCPTime & operator-= (DCPTime const & o) {
+ _t -= o._t;
+ return *this;
+ }
+
+ /** Round up to the nearest sampling interval
+ * at some sampling rate.
+ * @param r Sampling rate.
+ */
+ DCPTime round_up (int r) {
+ int64_t const n = HZ / r;
+ int64_t const a = _t + n - 1;
+ return DCPTime (a - (a % n));
+ }
+
+ DCPTime abs () const {
+ return DCPTime (std::abs (_t));
+ }
+
+ static DCPTime from_seconds (double s) {
+ return DCPTime (s * HZ);
+ }
+
+ template <class T>
+ static DCPTime from_frames (int64_t f, T r) {
+ return DCPTime (f * HZ / r);
+ }
+
+ static DCPTime delta () {
+ return DCPTime (1);
+ }
+
+ static DCPTime max () {
+ return DCPTime (INT64_MAX);
+ }
+};
+
+DCPTime min (DCPTime a, DCPTime b);
+
+#endif
{
public:
Decoded ()
- : dcp_time (0)
+ : content_time (0)
+ , dcp_time (0)
+ {}
+
+ Decoded (ContentTime t)
+ : content_time (t)
+ , dcp_time (0)
{}
virtual ~Decoded () {}
- virtual void set_dcp_times (VideoFrame, AudioFrame, FrameRateChange, DCPTime) = 0;
+ virtual void set_dcp_times (FrameRateChange frc, DCPTime offset)
+ {
+ dcp_time = DCPTime (content_time, frc) + offset;
+ }
+ ContentTime content_time;
DCPTime dcp_time;
};
DecodedVideo ()
: eyes (EYES_BOTH)
, same (false)
- , frame (0)
{}
- DecodedVideo (boost::shared_ptr<const Image> im, Eyes e, bool s, VideoFrame f)
- : image (im)
+ DecodedVideo (ContentTime t, boost::shared_ptr<const Image> im, Eyes e, bool s)
+ : Decoded (t)
+ , image (im)
, eyes (e)
, same (s)
- , frame (f)
{}
- void set_dcp_times (VideoFrame video_frame_rate, AudioFrame, FrameRateChange frc, DCPTime offset)
- {
- dcp_time = frame * TIME_HZ * frc.factor() / video_frame_rate + offset;
- }
-
boost::shared_ptr<const Image> image;
Eyes eyes;
bool same;
- VideoFrame frame;
};
class DecodedAudio : public Decoded
{
public:
- DecodedAudio (boost::shared_ptr<const AudioBuffers> d, AudioFrame f)
- : data (d)
- , frame (f)
+ DecodedAudio (ContentTime t, boost::shared_ptr<const AudioBuffers> d)
+ : Decoded (t)
+ , data (d)
{}
-
- void set_dcp_times (VideoFrame, AudioFrame audio_frame_rate, FrameRateChange, DCPTime offset)
- {
- dcp_time = frame * TIME_HZ / audio_frame_rate + offset;
- }
boost::shared_ptr<const AudioBuffers> data;
- AudioFrame frame;
};
class DecodedImageSubtitle : public Decoded
{
public:
DecodedImageSubtitle ()
- : content_time (0)
- , content_time_to (0)
+ : content_time_to (0)
, dcp_time_to (0)
{}
- DecodedImageSubtitle (boost::shared_ptr<Image> im, dcpomatic::Rect<double> r, ContentTime f, ContentTime t)
- : image (im)
- , rect (r)
- , content_time (f)
+ DecodedImageSubtitle (ContentTime f, ContentTime t, boost::shared_ptr<Image> im, dcpomatic::Rect<double> r)
+ : Decoded (f)
, content_time_to (t)
, dcp_time_to (0)
+ , image (im)
+ , rect (r)
{}
- void set_dcp_times (VideoFrame, AudioFrame, FrameRateChange frc, DCPTime offset)
+ void set_dcp_times (FrameRateChange frc, DCPTime offset)
{
- dcp_time = rint (content_time / frc.speed_up) + offset;
- dcp_time_to = rint (content_time_to / frc.speed_up) + offset;
+ Decoded::set_dcp_times (frc, offset);
+ dcp_time_to = DCPTime (content_time_to, frc) + offset;
}
- boost::shared_ptr<Image> image;
- dcpomatic::Rect<double> rect;
- ContentTime content_time;
ContentTime content_time_to;
DCPTime dcp_time_to;
+ boost::shared_ptr<Image> image;
+ dcpomatic::Rect<double> rect;
};
class DecodedTextSubtitle : public Decoded
{
public:
DecodedTextSubtitle ()
- : dcp_time_to (0)
+ : content_time_to (0)
+ , dcp_time_to (0)
{}
+ /* Assuming that all subs are at the same time */
DecodedTextSubtitle (std::list<dcp::SubtitleString> s)
- : subs (s)
- {}
-
- void set_dcp_times (VideoFrame, AudioFrame, FrameRateChange frc, DCPTime offset)
+ : Decoded (ContentTime::from_seconds (subs.front().in().to_ticks() * 4 / 1000.0))
+ , content_time_to (ContentTime::from_seconds (subs.front().out().to_ticks() * 4 / 1000.0))
+ , subs (s)
{
- if (subs.empty ()) {
- return;
- }
+
+ }
- /* Assuming that all subs are at the same time */
- dcp_time = rint (subs.front().in().to_ticks() * 4 * TIME_HZ / frc.speed_up) + offset;
- dcp_time_to = rint (subs.front().out().to_ticks() * 4 * TIME_HZ / frc.speed_up) + offset;
+ void set_dcp_times (FrameRateChange frc, DCPTime offset)
+ {
+ Decoded::set_dcp_times (frc, offset);
+ dcp_time_to = DCPTime (content_time_to, frc) + offset;
}
- std::list<dcp::SubtitleString> subs;
+ ContentTime content_time_to;
DCPTime dcp_time_to;
+ std::list<dcp::SubtitleString> subs;
};
#endif
#include <boost/weak_ptr.hpp>
#include <boost/utility.hpp>
#include "types.h"
+#include "dcpomatic_time.h"
class Film;
class Decoded;
shared_ptr<FFmpegExaminer> examiner (new FFmpegExaminer (shared_from_this ()));
- VideoFrame video_length = 0;
- video_length = examiner->video_length ();
- film->log()->log (String::compose ("Video length obtained from header as %1 frames", video_length));
+ ContentTime video_length = examiner->video_length ();
+ film->log()->log (String::compose ("Video length obtained from header as %1 frames", video_length.frames (video_frame_rate ())));
{
boost::mutex::scoped_lock lm (_mutex);
string
FFmpegContent::information () const
{
- if (video_length() == 0 || video_frame_rate() == 0) {
+ if (video_length() == ContentTime (0) || video_frame_rate() == ContentTime (0)) {
return "";
}
signal_changed (FFmpegContentProperty::AUDIO_STREAM);
}
-AudioFrame
+ContentTime
FFmpegContent::audio_length () const
{
- int const cafr = content_audio_frame_rate ();
- int const vfr = video_frame_rate ();
- VideoFrame const vl = video_length ();
-
- boost::mutex::scoped_lock lm (_mutex);
- if (!_audio_stream) {
- return 0;
+ {
+ boost::mutex::scoped_lock lm (_mutex);
+ if (!_audio_stream) {
+ return ContentTime ();
+ }
}
-
- return video_frames_to_audio_frames (vl, cafr, vfr);
+
+ return video_length();
}
int
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
-
- FrameRateChange frc (video_frame_rate (), film->video_frame_rate ());
- return video_length() * frc.factor() * TIME_HZ / film->video_frame_rate ();
+ return DCPTime (video_length(), FrameRateChange (video_frame_rate (), film->video_frame_rate ()));
}
AudioMapping
/* AudioContent */
int audio_channels () const;
- AudioFrame audio_length () const;
+ ContentTime audio_length () const;
int content_audio_frame_rate () const;
int output_audio_frame_rate () const;
AudioMapping audio_mapping () const;
int finished = 0;
r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
if (r >= 0 && finished) {
- last_video = rint (
- (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset) * TIME_HZ
- );
+ last_video = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset);
}
} else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, _packet.stream_index)) {
int finished;
r = avcodec_decode_audio4 (audio_codec_context(), _frame, &finished, &_packet);
if (r >= 0 && finished) {
- last_audio = rint (
- (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset) * TIME_HZ
- );
+ last_audio = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset);
}
copy_packet.data += r;
void
FFmpegDecoder::seek_and_flush (ContentTime t)
{
- int64_t s = ((double (t) / TIME_HZ) - _pts_offset) /
- av_q2d (_format_context->streams[_video_stream]->time_base);
+ int64_t s = (t.seconds() - _pts_offset) / av_q2d (_format_context->streams[_video_stream]->time_base);
if (_ffmpeg_content->audio_stream ()) {
s = min (
- s, int64_t (
- ((double (t) / TIME_HZ) - _pts_offset) /
- av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base)
- )
+ s, int64_t ((t.seconds() - _pts_offset) / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base))
);
}
- cout << "S&F " << t << "\n";
-
/* Ridiculous empirical hack */
s--;
if (s < 0) {
will hopefully then step through to where we want to be.
*/
- ContentTime pre_roll = accurate ? (0.2 * TIME_HZ) : 0;
+ ContentTime pre_roll = accurate ? ContentTime::from_seconds (0.2) : ContentTime (0);
ContentTime initial_seek = time - pre_roll;
- if (initial_seek < 0) {
- initial_seek = 0;
+ if (initial_seek < ContentTime (0)) {
+ initial_seek = ContentTime (0);
}
/* Initial seek time in the video stream's timebase */
}
if (frame_finished) {
- ContentTime const ct = (
+ ContentTime const ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base)
+ _pts_offset
- ) * TIME_HZ;
+ );
int const data_size = av_samples_get_buffer_size (
0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
}
if (i->second != AV_NOPTS_VALUE) {
- double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset;
- VideoFrame const f = rint (pts * _ffmpeg_content->video_frame_rate ());
- video (image, false, f);
+ video (image, false, ContentTime::from_seconds (i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset));
} else {
shared_ptr<const Film> film = _film.lock ();
assert (film);
indicate that the previous subtitle should stop.
*/
if (sub.num_rects <= 0) {
- image_subtitle (shared_ptr<Image> (), dcpomatic::Rect<double> (), 0, 0);
+ image_subtitle (shared_ptr<Image> (), dcpomatic::Rect<double> (), ContentTime (), ContentTime ());
return;
} else if (sub.num_rects > 1) {
throw DecodeError (_("multi-part subtitles not yet supported"));
double const packet_time = (static_cast<double> (sub.pts) / AV_TIME_BASE) + _pts_offset;
/* hence start time for this sub */
- ContentTime const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
- ContentTime const to = (packet_time + (double (sub.end_display_time) / 1e3)) * TIME_HZ;
+ ContentTime const from = ContentTime::from_seconds (packet_time + (double (sub.start_display_time) / 1e3));
+ ContentTime const to = ContentTime::from_seconds (packet_time + (double (sub.end_display_time) / 1e3));
AVSubtitleRect const * rect = sub.rects[0];
bool seek_overrun_finished (ContentTime, boost::optional<ContentTime>, boost::optional<ContentTime>) const;
bool seek_final_finished (int, int) const;
int minimal_run (boost::function<bool (boost::optional<ContentTime>, boost::optional<ContentTime>, int)>);
- void seek_and_flush (int64_t);
+ void seek_and_flush (ContentTime);
AVCodecContext* _subtitle_codec_context; ///< may be 0 if there is no subtitle
AVCodec* _subtitle_codec; ///< may be 0 if there is no subtitle
}
}
-optional<double>
+optional<ContentTime>
FFmpegExaminer::frame_time (AVStream* s) const
{
- optional<double> t;
+ optional<ContentTime> t;
int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
if (bet != AV_NOPTS_VALUE) {
- t = bet * av_q2d (s->time_base);
+ t = ContentTime (bet * av_q2d (s->time_base));
}
return t;
return dcp::Size (video_codec_context()->width, video_codec_context()->height);
}
-/** @return Length (in video frames) according to our content's header */
-VideoFrame
+/** @return Length according to our content's header */
+ContentTime
FFmpegExaminer::video_length () const
{
- VideoFrame const length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate();
- return max (1, length);
+ ContentTime const length = ContentTime::from_seconds (double (_format_context->duration) / AV_TIME_BASE);
+ return ContentTime (1, length.get ());
}
string
float video_frame_rate () const;
dcp::Size video_size () const;
- VideoFrame video_length () const;
+ ContentTime video_length () const;
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > subtitle_streams () const {
return _subtitle_streams;
return _audio_streams;
}
- boost::optional<double> first_video () const {
+ boost::optional<ContentTime> first_video () const {
return _first_video;
}
std::string stream_name (AVStream* s) const;
std::string audio_stream_name (AVStream* s) const;
std::string subtitle_stream_name (AVStream* s) const;
- boost::optional<double> frame_time (AVStream* s) const;
+ boost::optional<ContentTime> frame_time (AVStream* s) const;
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > _subtitle_streams;
std::vector<boost::shared_ptr<FFmpegAudioStream> > _audio_streams;
- boost::optional<double> _first_video;
+ boost::optional<ContentTime> _first_video;
};
return _playlist->has_subtitles ();
}
-VideoFrame
+int
Film::best_video_frame_rate () const
{
return _playlist->best_dcp_frame_rate ();
signal_changed (CONTENT);
}
-AudioFrame
-Film::time_to_audio_frames (DCPTime t) const
-{
- return divide_with_round (t * audio_frame_rate (), TIME_HZ);
-}
-
-VideoFrame
-Film::time_to_video_frames (DCPTime t) const
-{
- return divide_with_round (t * video_frame_rate (), TIME_HZ);
-}
-
-DCPTime
-Film::audio_frames_to_time (AudioFrame f) const
-{
- return divide_with_round (f * TIME_HZ, audio_frame_rate ());
-}
-
-DCPTime
-Film::video_frames_to_time (VideoFrame f) const
-{
- return divide_with_round (f * TIME_HZ, video_frame_rate ());
-}
-
-AudioFrame
+int
Film::audio_frame_rate () const
{
/* XXX */
uint64_t
Film::required_disk_space () const
{
- return uint64_t (j2k_bandwidth() / 8) * length() / TIME_HZ;
+ return uint64_t (j2k_bandwidth() / 8) * length().seconds();
}
/** This method checks the disk that the Film is on and tries to decide whether or not
boost::shared_ptr<Player> make_player () const;
boost::shared_ptr<Playlist> playlist () const;
- AudioFrame audio_frame_rate () const;
-
- AudioFrame time_to_audio_frames (DCPTime) const;
- VideoFrame time_to_video_frames (DCPTime) const;
- DCPTime video_frames_to_time (VideoFrame) const;
- DCPTime audio_frames_to_time (AudioFrame) const;
+ int audio_frame_rate () const;
uint64_t required_disk_space () const;
bool should_be_enough_disk_space (double &, double &) const;
ContentList content () const;
DCPTime length () const;
bool has_subtitles () const;
- VideoFrame best_video_frame_rate () const;
+ int best_video_frame_rate () const;
FrameRateChange active_frame_rate_change (DCPTime) const;
dcp::KDM
--- /dev/null
+/*
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <cmath>
+#include "frame_rate_change.h"
+#include "compose.hpp"
+
+#include "i18n.h"
+
+static bool
+about_equal (float a, float b)
+{
+ /* A film of F seconds at f FPS will be Ff frames;
+ Consider some delta FPS d, so if we run the same
+ film at (f + d) FPS it will last F(f + d) seconds.
+
+ Hence the difference in length over the length of the film will
+ be F(f + d) - Ff frames
+ = Ff + Fd - Ff frames
+ = Fd frames
+ = Fd/f seconds
+
+ So if we accept a difference of 1 frame, ie 1/f seconds, we can
+ say that
+
+ 1/f = Fd/f
+ ie 1 = Fd
+ ie d = 1/F
+
+ So for a 3hr film, ie F = 3 * 60 * 60 = 10800, the acceptable
+ FPS error is 1/F ~= 0.0001 ~= 10-e4
+ */
+
+ return (fabs (a - b) < 1e-4);
+}
+
+
+FrameRateChange::FrameRateChange (float source, int dcp)
+ : skip (false)
+ , repeat (1)
+ , change_speed (false)
+{
+ if (fabs (source / 2.0 - dcp) < fabs (source - dcp)) {
+ /* The difference between source and DCP frame rate will be lower
+ (i.e. better) if we skip.
+ */
+ skip = true;
+ } else if (fabs (source * 2 - dcp) < fabs (source - dcp)) {
+ /* The difference between source and DCP frame rate would be better
+ if we repeated each frame once; it may be better still if we
+ repeated more than once. Work out the required repeat.
+ */
+ repeat = round (dcp / source);
+ }
+
+ speed_up = dcp / (source * factor());
+ change_speed = !about_equal (speed_up, 1.0);
+
+ if (!skip && repeat == 1 && !change_speed) {
+ description = _("Content and DCP have the same rate.\n");
+ } else {
+ if (skip) {
+ description = _("DCP will use every other frame of the content.\n");
+ } else if (repeat == 2) {
+ description = _("Each content frame will be doubled in the DCP.\n");
+ } else if (repeat > 2) {
+ description = String::compose (_("Each content frame will be repeated %1 more times in the DCP.\n"), repeat - 1);
+ }
+
+ if (change_speed) {
+ float const pc = dcp * 100 / (source * factor());
+ description += String::compose (_("DCP will run at %1%% of the content speed.\n"), pc);
+ }
+ }
+}
--- /dev/null
+/*
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <string>
+
+struct FrameRateChange
+{
+ FrameRateChange (float, int);
+
+ /** @return factor by which to multiply a source frame rate
+ to get the effective rate after any skip or repeat has happened.
+ */
+ float factor () const {
+ if (skip) {
+ return 0.5;
+ }
+
+ return repeat;
+ }
+
+ /** true to skip every other frame */
+ bool skip;
+ /** number of times to use each frame (e.g. 1 is normal, 2 means repeat each frame once, and so on) */
+ int repeat;
+ /** true if this DCP will run its video faster or slower than the source
+ * without taking into account `repeat' nor `skip'.
+ * (e.g. change_speed will be true if
+ * source is 29.97fps, DCP is 30fps
+ * source is 14.50fps, DCP is 30fps
+ * but not if
+ * source is 15.00fps, DCP is 30fps
+ * source is 12.50fps, DCP is 25fps)
+ */
+ bool change_speed;
+
+ /** Amount by which the video is being sped-up in the DCP; e.g. for a
+ * 24fps source in a 25fps DCP this would be 25/24.
+ */
+ float speed_up;
+
+ std::string description;
+};
}
void
-ImageContent::set_video_length (VideoFrame len)
+ImageContent::set_video_length (ContentTime len)
{
{
boost::mutex::scoped_lock lm (_mutex);
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
-
- FrameRateChange frc (video_frame_rate(), film->video_frame_rate ());
- return video_length() * frc.factor() * TIME_HZ / video_frame_rate();
+ return DCPTime (video_length(), FrameRateChange (video_frame_rate(), film->video_frame_rate()));
}
string
std::string identifier () const;
- void set_video_length (VideoFrame);
+ void set_video_length (ContentTime);
bool still () const;
void set_video_frame_rate (float);
};
: Decoder (f)
, VideoDecoder (f, c)
, _image_content (c)
- , _video_position (0)
{
}
if (_image && _image_content->still ()) {
video (_image, true, _video_position);
- ++_video_position;
+ _video_position += ContentTime::from_frames (1, _image_content->video_frame_rate ());
return false;
}
Magick::Image* magick_image = 0;
- boost::filesystem::path const path = _image_content->path (_image_content->still() ? 0 : _video_position);
+
+ boost::filesystem::path const path = _image_content->path (
+ _image_content->still() ? 0 : _video_position.frames (_image_content->video_frame_rate ())
+ );
+
try {
magick_image = new Magick::Image (path.string ());
} catch (...) {
delete magick_image;
video (_image, false, _video_position);
- ++_video_position;
+ _video_position += ContentTime::from_frames (1, _image_content->video_frame_rate ());
return false;
}
ImageDecoder::seek (ContentTime time, bool accurate)
{
Decoder::seek (time, accurate);
-
- _video_position = rint (time * _video_content->video_frame_rate() / TIME_HZ);
+ _video_position = time;
}
boost::shared_ptr<const ImageContent> _image_content;
boost::shared_ptr<Image> _image;
- VideoFrame _video_position;
+ ContentTime _video_position;
};
ImageExaminer::ImageExaminer (shared_ptr<const Film> film, shared_ptr<const ImageContent> content, shared_ptr<Job>)
: _film (film)
, _image_content (content)
- , _video_length (0)
{
using namespace MagickCore;
Magick::Image* image = new Magick::Image (content->path(0).string());
delete image;
if (content->still ()) {
- _video_length = Config::instance()->default_still_length() * video_frame_rate();
+ _video_length = ContentTime (Config::instance()->default_still_length());
} else {
- _video_length = _image_content->number_of_paths ();
+ _video_length = ContentTime (double (_image_content->number_of_paths ()) / video_frame_rate ());
}
}
float video_frame_rate () const;
dcp::Size video_size () const;
- VideoFrame video_length () const {
+ ContentTime video_length () const {
return _video_length;
}
boost::weak_ptr<const Film> _film;
boost::shared_ptr<const ImageContent> _image_content;
boost::optional<dcp::Size> _video_size;
- VideoFrame _video_length;
+ ContentTime _video_length;
};
#include "player.h"
#include "film.h"
#include "ffmpeg_decoder.h"
+#include "audio_buffers.h"
#include "ffmpeg_content.h"
#include "image_decoder.h"
#include "image_content.h"
, _have_valid_pieces (false)
, _video_position (0)
, _audio_position (0)
- , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
+ , _audio_merger (f->audio_channels(), f->audio_frame_rate ())
, _last_emit_was_black (false)
, _just_did_inaccurate_seek (false)
, _approximate_size (false)
shared_ptr<Piece> earliest_piece;
shared_ptr<Decoded> earliest_decoded;
- DCPTime earliest_time = TIME_MAX;
- DCPTime earliest_audio = TIME_MAX;
+ DCPTime earliest_time = DCPTime::max ();
+ DCPTime earliest_audio = DCPTime::max ();
for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
}
- dec->set_dcp_times (_film->video_frame_rate(), _film->audio_frame_rate(), (*i)->frc, offset);
+ dec->set_dcp_times ((*i)->frc, offset);
DCPTime const t = dec->dcp_time - offset;
cout << "Peeked " << (*i)->content->paths()[0] << " for " << t << " cf " << ((*i)->content->full_length() - (*i)->content->trim_end ()) << "\n";
if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
return true;
}
- if (earliest_audio != TIME_MAX) {
- TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (max (int64_t (0), earliest_audio));
+ if (earliest_audio != DCPTime::max ()) {
+ if (earliest_audio.get() < 0) {
+ earliest_audio = DCPTime ();
+ }
+ TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (earliest_audio);
Audio (tb.audio, tb.time);
- /* This assumes that the audio_frames_to_time conversion is exact
+ /* This assumes that the audio-frames-to-time conversion is exact
so that there are no accumulated errors caused by rounding.
*/
- _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+ _audio_position += DCPTime::from_frames (tb.audio->frames(), _film->audio_frame_rate ());
}
/* Emit the earliest thing */
shared_ptr<DecodedImageSubtitle> dis = dynamic_pointer_cast<DecodedImageSubtitle> (earliest_decoded);
shared_ptr<DecodedTextSubtitle> dts = dynamic_pointer_cast<DecodedTextSubtitle> (earliest_decoded);
- if (dv) {
- cout << "Video @ " << dv->dcp_time << " " << (double(dv->dcp_time) / TIME_HZ) << ".\n";
- } else if (da) {
- cout << "Audio.\n";
- } else if (dis) {
- cout << "Image sub.\n";
- } else if (dts) {
- cout << "Text sub.\n";
- }
-
/* Will be set to false if we shouldn't consume the peeked DecodedThing */
bool consume = true;
{
/* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */
if (video->eyes != EYES_LEFT) {
- /* This assumes that the video_frames_to_time conversion is exact
+ /* This assumes that the video-frames-to-time conversion is exact
so that there are no accumulated errors caused by rounding.
*/
- _video_position += _film->video_frames_to_time (1);
+ _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
}
}
audio->data = dcp_mapped;
/* Delay */
- audio->dcp_time += content->audio_delay() * TIME_HZ / 1000;
- if (audio->dcp_time < 0) {
- int const frames = - audio->dcp_time * _film->audio_frame_rate() / TIME_HZ;
+ audio->dcp_time += DCPTime::from_seconds (content->audio_delay() / 1000.0);
+ if (audio->dcp_time < DCPTime (0)) {
+ int const frames = - audio->dcp_time.frames (_film->audio_frame_rate());
if (frames >= audio->data->frames ()) {
return;
}
trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0);
audio->data = trimmed;
- audio->dcp_time = 0;
+ audio->dcp_time = DCPTime ();
}
_audio_merger.push (audio->data, audio->dcp_time);
TimedAudioBuffers<DCPTime> tb = _audio_merger.flush ();
if (_audio && tb.audio) {
Audio (tb.audio, tb.time);
- _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+ _audio_position += DCPTime::from_frames (tb.audio->frames (), _film->audio_frame_rate ());
}
while (_video && _video_position < _audio_position) {
s = min ((*i)->content->length_after_trim(), s);
/* Convert this to the content time */
- ContentTime ct = (s + (*i)->content->trim_start()) * (*i)->frc.speed_up;
+ ContentTime ct (s + (*i)->content->trim_start(), (*i)->frc);
/* And seek the decoder */
- cout << "seek " << (*i)->content->paths()[0] << " to " << ct << "\n";
(*i)->decoder->seek (ct, accurate);
}
- _video_position = time_round_up (t, TIME_HZ / _film->video_frame_rate());
- _audio_position = time_round_up (t, TIME_HZ / _film->audio_frame_rate());
+ _video_position = t.round_up (_film->video_frame_rate());
+ _audio_position = t.round_up (_film->audio_frame_rate());
_audio_merger.clear (_audio_position);
optional<FrameRateChange> frc;
/* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
- DCPTime best_overlap_t = 0;
+ DCPTime best_overlap_t;
shared_ptr<VideoContent> best_overlap;
for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
frc = best_overlap_frc;
}
- ContentTime st = (*i)->trim_start() * frc->speed_up;
+ ContentTime st ((*i)->trim_start(), frc.get ());
decoder->seek (st, true);
_pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
/* The Piece for the _last_incoming_video will no longer be valid */
_last_incoming_video.video.reset ();
- _video_position = _audio_position = 0;
+ _video_position = DCPTime ();
+ _audio_position = DCPTime ();
}
void
#endif
Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
- _video_position += _film->video_frames_to_time (1);
+ _video_position += DCPTime::from_frames (1, _film->video_frame_rate ());
_last_emit_was_black = true;
}
return;
}
- DCPTime t = min (most, TIME_HZ / 2);
- shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t * _film->audio_frame_rate() / TIME_HZ));
+ DCPTime t = min (most, DCPTime::from_seconds (0.5));
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), t.frames (_film->audio_frame_rate())));
silence->make_silent ();
Audio (silence, _audio_position);
void do_seek (DCPTime, bool);
void flush ();
void emit_black ();
- void emit_silence (AudioFrame);
+ void emit_silence (DCPTime);
void film_changed (Film::Property);
void update_subtitle_from_image ();
void update_subtitle_from_text ();
/** The time after the last audio that we emitted */
DCPTime _audio_position;
- AudioMerger<DCPTime, AudioFrame> _audio_merger;
+ AudioMerger _audio_merger;
dcp::Size _video_container_size;
boost::shared_ptr<PlayerImage> _black_frame;
_sequencing_video = true;
ContentList cl = _content;
- DCPTime next = 0;
+ DCPTime next;
for (ContentList::iterator i = _content.begin(); i != _content.end(); ++i) {
if (!dynamic_pointer_cast<VideoContent> (*i)) {
continue;
}
(*i)->set_position (next);
- next = (*i)->end() + 1;
+ next = (*i)->end() + DCPTime::delta ();
}
/* This won't change order, so it does not need a sort */
DCPTime
Playlist::length () const
{
- DCPTime len = 0;
+ DCPTime len;
for (ContentList::const_iterator i = _content.begin(); i != _content.end(); ++i) {
- len = max (len, (*i)->end() + 1);
+ len = max (len, (*i)->end() + DCPTime::delta ());
}
return len;
DCPTime
Playlist::video_end () const
{
- DCPTime end = 0;
+ DCPTime end;
for (ContentList::const_iterator i = _content.begin(); i != _content.end(); ++i) {
if (dynamic_pointer_cast<const VideoContent> (*i)) {
end = max (end, (*i)->end ());
void
Playlist::repeat (ContentList c, int n)
{
- pair<DCPTime, DCPTime> range (TIME_MAX, 0);
+ pair<DCPTime, DCPTime> range (DCPTime::max (), DCPTime ());
for (ContentList::iterator i = c.begin(); i != c.end(); ++i) {
range.first = min (range.first, (*i)->position ());
range.second = max (range.second, (*i)->position ());
, _audio_mapping (node->node_child ("AudioMapping"), version)
{
_audio_channels = node->number_child<int> ("AudioChannels");
- _audio_length = node->number_child<AudioFrame> ("AudioLength");
+ _audio_length = ContentTime (node->number_child<int64_t> ("AudioLength"));
_audio_frame_rate = node->number_child<int> ("AudioFrameRate");
}
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
-
- AudioFrame const len = divide_with_round (audio_length() * output_audio_frame_rate(), content_audio_frame_rate ());
-
- /* XXX: this depends on whether, alongside this audio, we are running video slower or faster than
- it should be. The calculation above works out the output audio frames assuming that we are just
- resampling the audio: it would be incomplete if, for example, we were running this audio alongside
- 25fps video that was being run at 24fps.
- */
-
- return film->audio_frames_to_time (len);
+ return DCPTime (audio_length(), film->active_frame_rate_change (position ()));
}
int
return _audio_channels;
}
- AudioFrame audio_length () const {
+ ContentTime audio_length () const {
boost::mutex::scoped_lock lm (_mutex);
return _audio_length;
}
private:
int _audio_channels;
- AudioFrame _audio_length;
+ ContentTime _audio_length;
int _audio_frame_rate;
AudioMapping _audio_mapping;
};
}
data->set_frames (this_time);
- audio (data, _done * TIME_HZ / audio_frame_rate ());
+ audio (data, ContentTime::from_frames (_done, audio_frame_rate ()));
_done += this_time;
_remaining -= this_time;
return _info.channels;
}
-AudioFrame
+ContentTime
SndfileDecoder::audio_length () const
{
- return _info.frames;
+ return ContentTime::from_frames (_info.frames, audio_frame_rate ());
}
int
Decoder::seek (t, accurate);
AudioDecoder::seek (t, accurate);
- _done = t * audio_frame_rate() / TIME_HZ;
+ _done = t.frames (audio_frame_rate ());
_remaining = _info.frames - _done;
}
void seek (ContentTime, bool);
int audio_channels () const;
- AudioFrame audio_length () const;
+ ContentTime audio_length () const;
int audio_frame_rate () const;
private:
boost::shared_ptr<const SndfileContent> _sndfile_content;
SNDFILE* _sndfile;
SF_INFO _info;
- AudioFrame _done;
- AudioFrame _remaining;
+ int64_t _done;
+ int64_t _remaining;
float* _deinterleave_buffer;
};
ContentTime
SubRip::convert_time (string t)
{
- ContentTime r = 0;
+ ContentTime r;
vector<string> a;
boost::algorithm::split (a, t, boost::is_any_of (":"));
assert (a.size() == 3);
- r += lexical_cast<int> (a[0]) * 60 * 60 * TIME_HZ;
- r += lexical_cast<int> (a[1]) * 60 * TIME_HZ;
+ r += ContentTime::from_seconds (lexical_cast<int> (a[0]) * 60 * 60);
+ r += ContentTime::from_seconds (lexical_cast<int> (a[1]) * 60);
vector<string> b;
boost::algorithm::split (b, a[2], boost::is_any_of (","));
- r += lexical_cast<int> (b[0]) * TIME_HZ;
- r += lexical_cast<int> (b[1]) * TIME_HZ / 1000;
+ r += ContentTime::from_seconds (lexical_cast<int> (b[0]));
+ r += ContentTime::from_seconds (lexical_cast<float> (b[1]) / 1000);
return r;
}
SubRip::length () const
{
if (_subtitles.empty ()) {
- return 0;
+ return ContentTime ();
}
return _subtitles.back().to;
#include "subrip_content.h"
#include "util.h"
#include "subrip.h"
+#include "film.h"
#include "i18n.h"
SubRipContent::SubRipContent (shared_ptr<const Film> film, shared_ptr<const cxml::Node> node, int version)
: Content (film, node)
, SubtitleContent (film, node, version)
- , _length (node->number_child<DCPTime> ("Length"))
+ , _length (node->number_child<int64_t> ("Length"))
{
}
Content::examine (job);
SubRip s (shared_from_this ());
boost::mutex::scoped_lock lm (_mutex);
- _length = s.length ();
+ shared_ptr<const Film> film = _film.lock ();
+ _length = DCPTime (s.length (), film->active_frame_rate_change (position ()));
}
string
i->italic,
dcp::Color (255, 255, 255),
72,
- _subtitles[_next].from,
- _subtitles[_next].to,
+ dcp::Time (rint (_subtitles[_next].from.seconds() * 250)),
+ dcp::Time (rint (_subtitles[_next].to.seconds() * 250)),
0.9,
dcp::BOTTOM,
i->text,
#include <boost/optional.hpp>
#include <dcp/types.h>
#include "types.h"
+#include "dcpomatic_time.h"
struct SubRipSubtitlePiece
{
void
SubtitleDecoder::image_subtitle (shared_ptr<Image> image, dcpomatic::Rect<double> rect, ContentTime from, ContentTime to)
{
- _pending.push_back (shared_ptr<DecodedImageSubtitle> (new DecodedImageSubtitle (image, rect, from, to)));
+ _pending.push_back (shared_ptr<DecodedImageSubtitle> (new DecodedImageSubtitle (from, to, image, rect)));
}
void
return s.str ();
}
+/** @return Approximate remaining time in seconds */
int
TranscodeJob::remaining_time () const
{
}
/* Compute approximate proposed length here, as it's only here that we need it */
- VideoFrame const left = _film->time_to_video_frames (_film->length ()) - t->video_frames_out();
- return left / fps;
+ return (_film->length().frames (_film->video_frame_rate ()) - t->video_frames_out()) / fps;
}
*/
#define SERVER_LINK_VERSION 1
-typedef int64_t DCPTime;
-#define TIME_MAX INT64_MAX
-#define TIME_HZ ((DCPTime) 96000)
-typedef int64_t ContentTime;
-typedef int64_t AudioFrame;
-typedef int VideoFrame;
typedef std::vector<boost::shared_ptr<Content> > ContentList;
typedef std::vector<boost::shared_ptr<VideoContent> > VideoContentList;
typedef std::vector<boost::shared_ptr<AudioContent> > AudioContentList;
return s.str ();
}
-static bool
-about_equal (float a, float b)
-{
- /* A film of F seconds at f FPS will be Ff frames;
- Consider some delta FPS d, so if we run the same
- film at (f + d) FPS it will last F(f + d) seconds.
-
- Hence the difference in length over the length of the film will
- be F(f + d) - Ff frames
- = Ff + Fd - Ff frames
- = Fd frames
- = Fd/f seconds
-
- So if we accept a difference of 1 frame, ie 1/f seconds, we can
- say that
-
- 1/f = Fd/f
- ie 1 = Fd
- ie d = 1/F
-
- So for a 3hr film, ie F = 3 * 60 * 60 = 10800, the acceptable
- FPS error is 1/F ~= 0.0001 ~= 10-e4
- */
-
- return (fabs (a - b) < 1e-4);
-}
-
/** @param An arbitrary audio frame rate.
* @return The appropriate DCP-approved frame rate (48kHz or 96kHz).
*/
assert (boost::this_thread::get_id() == ui_thread);
}
-/** @param v Content video frame.
- * @param audio_sample_rate Source audio sample rate.
- * @param frames_per_second Number of video frames per second.
- * @return Equivalent number of audio frames for `v'.
- */
-int64_t
-video_frames_to_audio_frames (VideoFrame v, float audio_sample_rate, float frames_per_second)
-{
- return ((int64_t) v * audio_sample_rate / frames_per_second);
-}
-
string
audio_channel_name (int c)
{
return channels[c];
}
-FrameRateChange::FrameRateChange (float source, int dcp)
- : skip (false)
- , repeat (1)
- , change_speed (false)
-{
- if (fabs (source / 2.0 - dcp) < fabs (source - dcp)) {
- /* The difference between source and DCP frame rate will be lower
- (i.e. better) if we skip.
- */
- skip = true;
- } else if (fabs (source * 2 - dcp) < fabs (source - dcp)) {
- /* The difference between source and DCP frame rate would be better
- if we repeated each frame once; it may be better still if we
- repeated more than once. Work out the required repeat.
- */
- repeat = round (dcp / source);
- }
-
- speed_up = dcp / (source * factor());
- change_speed = !about_equal (speed_up, 1.0);
-
- if (!skip && repeat == 1 && !change_speed) {
- description = _("Content and DCP have the same rate.\n");
- } else {
- if (skip) {
- description = _("DCP will use every other frame of the content.\n");
- } else if (repeat == 2) {
- description = _("Each content frame will be doubled in the DCP.\n");
- } else if (repeat > 2) {
- description = String::compose (_("Each content frame will be repeated %1 more times in the DCP.\n"), repeat - 1);
- }
-
- if (change_speed) {
- float const pc = dcp * 100 / (source * factor());
- description += String::compose (_("DCP will run at %1%% of the content speed.\n"), pc);
- }
- }
-}
-
LocaleGuard::LocaleGuard ()
: _old (0)
{
return dcp::Size (full_frame.width, rint (full_frame.width / ratio));
}
-DCPTime
-time_round_up (DCPTime t, DCPTime nearest)
-{
- DCPTime const a = t + nearest - 1;
- return a - (a % nearest);
-}
-
void *
wrapped_av_malloc (size_t s)
{
extern dcp::Size fit_ratio_within (float ratio, dcp::Size);
extern std::string entities_to_text (std::string e);
extern std::map<std::string, std::string> split_get_request (std::string url);
-
-struct FrameRateChange
-{
- FrameRateChange (float, int);
-
- /** @return factor by which to multiply a source frame rate
- to get the effective rate after any skip or repeat has happened.
- */
- float factor () const {
- if (skip) {
- return 0.5;
- }
-
- return repeat;
- }
-
- /** true to skip every other frame */
- bool skip;
- /** number of times to use each frame (e.g. 1 is normal, 2 means repeat each frame once, and so on) */
- int repeat;
- /** true if this DCP will run its video faster or slower than the source
- * without taking into account `repeat' nor `skip'.
- * (e.g. change_speed will be true if
- * source is 29.97fps, DCP is 30fps
- * source is 14.50fps, DCP is 30fps
- * but not if
- * source is 15.00fps, DCP is 30fps
- * source is 12.50fps, DCP is 25fps)
- */
- bool change_speed;
-
- /** Amount by which the video is being sped-up in the DCP; e.g. for a
- * 24fps source in a 25fps DCP this would be 25/24.
- */
- float speed_up;
-
- std::string description;
-};
-
extern int dcp_audio_frame_rate (int);
extern int stride_round_up (int, int const *, int);
-extern DCPTime time_round_up (DCPTime, DCPTime);
extern std::multimap<std::string, std::string> read_key_value (std::istream& s);
extern int get_required_int (std::multimap<std::string, std::string> const & kv, std::string k);
extern float get_required_float (std::multimap<std::string, std::string> const & kv, std::string k);
int _timeout;
};
-extern int64_t video_frames_to_audio_frames (VideoFrame v, float audio_sample_rate, float frames_per_second);
-
class LocaleGuard
{
public:
setup_default_colour_conversion ();
}
-VideoContent::VideoContent (shared_ptr<const Film> f, DCPTime s, VideoFrame len)
+VideoContent::VideoContent (shared_ptr<const Film> f, DCPTime s, ContentTime len)
: Content (f, s)
, _video_length (len)
, _video_frame_rate (0)
: Content (f, node)
, _ratio (0)
{
- _video_length = node->number_child<VideoFrame> ("VideoLength");
+ _video_length = ContentTime (node->number_child<int64_t> ("VideoLength"));
_video_size.width = node->number_child<int> ("VideoWidth");
_video_size.height = node->number_child<int> ("VideoHeight");
_video_frame_rate = node->number_child<float> ("VideoFrameRate");
}
/** @param t A time offset from the start of this piece of content.
- * @return Corresponding frame index, rounded up so that the frame index
- * is that of the next complete frame which starts after `t'.
+ * @return Corresponding time with respect to the content.
*/
-VideoFrame
-VideoContent::time_to_content_video_frames (DCPTime t) const
+ContentTime
+VideoContent::dcp_time_to_content_time (DCPTime t) const
{
shared_ptr<const Film> film = _film.lock ();
assert (film);
-
- /* Here we are converting from time (in the DCP) to a frame number in the content.
- Hence we need to use the DCP's frame rate and the double/skip correction, not
- the source's rate; source rate will be equal to DCP rate if we ignore
- double/skip. There's no need to call Film::active_frame_rate_change() here
- as we know that we are it (since we're video).
- */
- FrameRateChange frc (video_frame_rate(), film->video_frame_rate());
- return ceil (t * film->video_frame_rate() / (frc.factor() * TIME_HZ));
+ return ContentTime (t, FrameRateChange (video_frame_rate(), film->video_frame_rate()));
}
typedef int Frame;
VideoContent (boost::shared_ptr<const Film>);
- VideoContent (boost::shared_ptr<const Film>, DCPTime, VideoFrame);
+ VideoContent (boost::shared_ptr<const Film>, DCPTime, ContentTime);
VideoContent (boost::shared_ptr<const Film>, boost::filesystem::path);
VideoContent (boost::shared_ptr<const Film>, boost::shared_ptr<const cxml::Node>);
VideoContent (boost::shared_ptr<const Film>, std::vector<boost::shared_ptr<Content> >);
virtual std::string information () const;
virtual std::string identifier () const;
- VideoFrame video_length () const {
+ ContentTime video_length () const {
boost::mutex::scoped_lock lm (_mutex);
return _video_length;
}
dcp::Size video_size_after_3d_split () const;
dcp::Size video_size_after_crop () const;
- VideoFrame time_to_content_video_frames (DCPTime) const;
+ ContentTime dcp_time_to_content_time (DCPTime) const;
protected:
void take_from_video_examiner (boost::shared_ptr<VideoExaminer>);
- VideoFrame _video_length;
+ ContentTime _video_length;
float _video_frame_rate;
private:
/** Called by subclasses when they have a video frame ready */
void
-VideoDecoder::video (shared_ptr<const Image> image, bool same, VideoFrame frame)
+VideoDecoder::video (shared_ptr<const Image> image, bool same, ContentTime time)
{
switch (_video_content->video_frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (image, EYES_BOTH, same, frame)));
+ _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image, EYES_BOTH, same)));
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
{
int const half = image->size().width / 2;
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, same, frame)));
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, same, frame)));
+ _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, half, 0, 0), true), EYES_LEFT, same)));
+ _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (half, 0, 0, 0), true), EYES_RIGHT, same)));
break;
}
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
{
int const half = image->size().height / 2;
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, same, frame)));
- _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, same, frame)));
+ _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, 0, half), true), EYES_LEFT, same)));
+ _pending.push_back (shared_ptr<DecodedVideo> (new DecodedVideo (time, image->crop (Crop (0, 0, half, 0), true), EYES_RIGHT, same)));
break;
}
default:
protected:
- void video (boost::shared_ptr<const Image>, bool, VideoFrame);
+ void video (boost::shared_ptr<const Image>, bool, ContentTime);
boost::shared_ptr<const VideoContent> _video_content;
};
virtual ~VideoExaminer () {}
virtual float video_frame_rate () const = 0;
virtual dcp::Size video_size () const = 0;
- virtual VideoFrame video_length () const = 0;
+ virtual ContentTime video_length () const = 0;
};
#include "config.h"
#include "job.h"
#include "cross.h"
+#include "audio_buffers.h"
#include "i18n.h"
if (_film->length()) {
shared_ptr<Job> job = _job.lock ();
assert (job);
- int total = _film->time_to_video_frames (_film->length ());
+ int64_t total = _film->length().frames (_film->video_frame_rate ());
if (_film->three_d ()) {
/* _full_written and so on are incremented for each eye, so we need to double the total
frames to get the correct progress.
audio_content.cc
audio_decoder.cc
audio_mapping.cc
+ audio_merger.cc
cinema.cc
colour_conversion.cc
config.cc
dci_metadata.cc
dcp_content_type.cc
dcp_video_frame.cc
+ dcpomatic_time.cc
decoder.cc
dolby_cp750.cc
encoder.cc
ffmpeg_examiner.cc
film.cc
filter.cc
+ frame_rate_change.cc
image.cc
image_content.cc
image_decoder.cc
_timeline_dialog = 0;
}
- _timeline_dialog = new DCPTimelineDialog (this, _film);
+ _timeline_dialog = new TimelineDialog (this, _film);
_timeline_dialog->Show ();
}
class wxListCtrl;
class wxListEvent;
class Film;
-class DCPTimelineDialog;
+class TimelineDialog;
class Ratio;
-class DCPTimecode;
+class Timecode;
class FilmEditorPanel;
class SubtitleContent;
std::vector<Ratio const *> _ratios;
bool _generally_sensitive;
- DCPTimelineDialog* _timeline_dialog;
+ TimelineDialog* _timeline_dialog;
};
_frame.reset ();
_slider->SetValue (0);
- set_position_text (0);
+ set_position_text (DCPTime ());
if (!_film) {
return;
{
if (_film && _player) {
try {
- _player->seek (_slider->GetValue() * _film->length() / 4096, false);
+ _player->seek (DCPTime (_film->length().get() * _slider->GetValue() / 4096), false);
fetch_next_frame ();
} catch (OpenFileError& e) {
/* There was a problem opening a content file; we'll let this slide as it
double const fps = _film->video_frame_rate ();
/* Count frame number from 1 ... not sure if this is the best idea */
- _frame_number->SetLabel (wxString::Format (wxT("%d"), int (rint (t * fps / TIME_HZ)) + 1));
+ _frame_number->SetLabel (wxString::Format (wxT("%d"), int (rint (t.seconds() * fps)) + 1));
- double w = static_cast<double>(t) / TIME_HZ;
+ double w = t.seconds ();
int const h = (w / 3600);
w -= h * 3600;
int const m = (w / 60);
We want to see the one before it, so we need to go back 2.
*/
- DCPTime p = _player->video_position() - _film->video_frames_to_time (2);
+ DCPTime p = _player->video_position() - DCPTime::from_frames (2, _film->video_frame_rate ());
if (p < 0) {
- p = 0;
+ p = DCPTime ();
}
try {
_encoded->Finished.connect (boost::bind (&PropertiesDialog::layout, this));
_table->Add (_encoded, 1, wxALIGN_CENTER_VERTICAL);
- _frames->SetLabel (std_to_wx (lexical_cast<string> (_film->time_to_video_frames (_film->length()))));
+ _frames->SetLabel (std_to_wx (lexical_cast<string> (_film->length().frames (_film->video_frame_rate ()))));
double const disk = double (_film->required_disk_space()) / 1073741824.0f;
stringstream s;
s << fixed << setprecision (1) << disk << wx_to_std (_("Gb"));
if (_film->length()) {
/* XXX: encoded_frames() should check which frames have been encoded */
- u << " (" << (_film->encoded_frames() * 100 / _film->time_to_video_frames (_film->length())) << "%)";
+ u << " (" << (_film->encoded_frames() * 100 / _film->length().frames (_film->video_frame_rate ())) << "%)";
}
return u.str ();
}
void
Timecode::set (DCPTime t, int fps)
{
- int const h = t / (3600 * TIME_HZ);
- t -= h * 3600 * TIME_HZ;
- int const m = t / (60 * TIME_HZ);
- t -= m * 60 * TIME_HZ;
- int const s = t / TIME_HZ;
- t -= s * TIME_HZ;
- int const f = divide_with_round (t * fps, TIME_HZ);
+ int const h = t.seconds() / 3600;
+ t -= DCPTime::from_seconds (h * 3600);
+ int const m = t.seconds() / 60;
+ t -= DCPTime::from_seconds (m * 60);
+ int const s = t.seconds();
+ t -= DCPTime::from_seconds (s);
+ int const f = rint (t.seconds() * fps);
checked_set (_hours, lexical_cast<string> (h));
checked_set (_minutes, lexical_cast<string> (m));
DCPTime
Timecode::get (int fps) const
{
- DCPTime t = 0;
+ DCPTime t;
string const h = wx_to_std (_hours->GetValue ());
- t += lexical_cast<int> (h.empty() ? "0" : h) * 3600 * TIME_HZ;
+ t += DCPTime::from_seconds (lexical_cast<int> (h.empty() ? "0" : h) * 3600);
string const m = wx_to_std (_minutes->GetValue());
- t += lexical_cast<int> (m.empty() ? "0" : m) * 60 * TIME_HZ;
+ t += DCPTime::from_seconds (lexical_cast<int> (m.empty() ? "0" : m) * 60);
string const s = wx_to_std (_seconds->GetValue());
- t += lexical_cast<int> (s.empty() ? "0" : s) * TIME_HZ;
+ t += DCPTime::from_seconds (lexical_cast<int> (s.empty() ? "0" : s));
string const f = wx_to_std (_frames->GetValue());
- t += lexical_cast<int> (f.empty() ? "0" : f) * TIME_HZ / fps;
+ t += DCPTime::from_seconds (lexical_cast<double> (f.empty() ? "0" : f) / fps);
return t;
}
class View : public boost::noncopyable
{
public:
- View (DCPTimeline& t)
+ View (Timeline& t)
: _timeline (t)
{
protected:
virtual void do_paint (wxGraphicsContext *) = 0;
- int time_x (DCPTime t) const
+ int time_x (double t) const
{
- return _timeline.tracks_position().x + t * _timeline.pixels_per_time_unit();
+ return _timeline.tracks_position().x + t * _timeline.pixels_per_second ();
}
- DCPTimeline& _timeline;
+ Timeline& _timeline;
private:
dcpomatic::Rect<int> _last_paint_bbox;
class ContentView : public View
{
public:
- ContentView (DCPTimeline& tl, shared_ptr<Content> c)
+ ContentView (Timeline& tl, shared_ptr<Content> c)
: View (tl)
, _content (c)
, _track (0)
return dcpomatic::Rect<int> (
time_x (content->position ()) - 8,
y_pos (_track) - 8,
- content->length_after_trim () * _timeline.pixels_per_time_unit() + 16,
+ content->length_after_trim().seconds() * _timeline.pixels_per_second() + 16,
_timeline.track_height() + 16
);
}
wxDouble name_leading;
gc->GetTextExtent (name, &name_width, &name_height, &name_descent, &name_leading);
- gc->Clip (wxRegion (time_x (position), y_pos (_track), len * _timeline.pixels_per_time_unit(), _timeline.track_height()));
+ gc->Clip (wxRegion (time_x (position), y_pos (_track), len.seconds() * _timeline.pixels_per_second(), _timeline.track_height()));
gc->DrawText (name, time_x (position) + 12, y_pos (_track + 1) - name_height - 4);
gc->ResetClip ();
}
}
if (!frequent) {
- _timeline.setup_pixels_per_time_unit ();
+ _timeline.setup_pixels_per_second ();
_timeline.Refresh ();
}
}
class AudioContentView : public ContentView
{
public:
- AudioContentView (DCPTimeline& tl, shared_ptr<Content> c)
+ AudioContentView (Timeline& tl, shared_ptr<Content> c)
: ContentView (tl, c)
{}
class VideoContentView : public ContentView
{
public:
- VideoContentView (DCPTimeline& tl, shared_ptr<Content> c)
+ VideoContentView (Timeline& tl, shared_ptr<Content> c)
: ContentView (tl, c)
{}
}
};
-class DCPTimeAxisView : public View
+class TimeAxisView : public View
{
public:
- DCPTimeAxisView (DCPTimeline& tl, int y)
+ TimeAxisView (Timeline& tl, int y)
: View (tl)
, _y (y)
{}
{
gc->SetPen (*wxThePenList->FindOrCreatePen (wxColour (0, 0, 0), 1, wxPENSTYLE_SOLID));
- int mark_interval = rint (128 / (TIME_HZ * _timeline.pixels_per_time_unit ()));
+ double mark_interval = rint (128 / _timeline.pixels_per_second ());
if (mark_interval > 5) {
- mark_interval -= mark_interval % 5;
+ mark_interval -= int (rint (mark_interval)) % 5;
}
if (mark_interval > 10) {
- mark_interval -= mark_interval % 10;
+ mark_interval -= int (rint (mark_interval)) % 10;
}
if (mark_interval > 60) {
- mark_interval -= mark_interval % 60;
+ mark_interval -= int (rint (mark_interval)) % 60;
}
if (mark_interval > 3600) {
- mark_interval -= mark_interval % 3600;
+ mark_interval -= int (rint (mark_interval)) % 3600;
}
if (mark_interval < 1) {
path.AddLineToPoint (_timeline.width(), _y);
gc->StrokePath (path);
- DCPTime t = 0;
- while ((t * _timeline.pixels_per_time_unit()) < _timeline.width()) {
+ /* Time in seconds */
+ double t;
+ while ((t * _timeline.pixels_per_second()) < _timeline.width()) {
wxGraphicsPath path = gc->CreatePath ();
path.MoveToPoint (time_x (t), _y - 4);
path.AddLineToPoint (time_x (t), _y + 4);
gc->StrokePath (path);
- int tc = t / TIME_HZ;
+ double tc = t;
int const h = tc / 3600;
tc -= h * 3600;
int const m = tc / 60;
wxDouble str_leading;
gc->GetTextExtent (str, &str_width, &str_height, &str_descent, &str_leading);
- int const tx = _timeline.x_offset() + t * _timeline.pixels_per_time_unit();
+ int const tx = _timeline.x_offset() + t * _timeline.pixels_per_second();
if ((tx + str_width) < _timeline.width()) {
gc->DrawText (str, time_x (t), _y + 16);
}
- t += mark_interval * TIME_HZ;
+ t += mark_interval;
}
}
};
-DCPTimeline::DCPTimeline (wxWindow* parent, FilmEditor* ed, shared_ptr<Film> film)
+Timeline::Timeline (wxWindow* parent, FilmEditor* ed, shared_ptr<Film> film)
: wxPanel (parent, wxID_ANY, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
, _film_editor (ed)
, _film (film)
- , _time_axis_view (new DCPTimeAxisView (*this, 32))
+ , _time_axis_view (new TimeAxisView (*this, 32))
, _tracks (0)
- , _pixels_per_time_unit (0)
+ , _pixels_per_second (0)
, _left_down (false)
, _down_view_position (0)
, _first_move (false)
SetDoubleBuffered (true);
#endif
- Bind (wxEVT_PAINT, boost::bind (&DCPTimeline::paint, this));
- Bind (wxEVT_LEFT_DOWN, boost::bind (&DCPTimeline::left_down, this, _1));
- Bind (wxEVT_LEFT_UP, boost::bind (&DCPTimeline::left_up, this, _1));
- Bind (wxEVT_RIGHT_DOWN, boost::bind (&DCPTimeline::right_down, this, _1));
- Bind (wxEVT_MOTION, boost::bind (&DCPTimeline::mouse_moved, this, _1));
- Bind (wxEVT_SIZE, boost::bind (&DCPTimeline::resized, this));
+ Bind (wxEVT_PAINT, boost::bind (&Timeline::paint, this));
+ Bind (wxEVT_LEFT_DOWN, boost::bind (&Timeline::left_down, this, _1));
+ Bind (wxEVT_LEFT_UP, boost::bind (&Timeline::left_up, this, _1));
+ Bind (wxEVT_RIGHT_DOWN, boost::bind (&Timeline::right_down, this, _1));
+ Bind (wxEVT_MOTION, boost::bind (&Timeline::mouse_moved, this, _1));
+ Bind (wxEVT_SIZE, boost::bind (&Timeline::resized, this));
playlist_changed ();
SetMinSize (wxSize (640, tracks() * track_height() + 96));
- _playlist_connection = film->playlist()->Changed.connect (bind (&DCPTimeline::playlist_changed, this));
+ _playlist_connection = film->playlist()->Changed.connect (bind (&Timeline::playlist_changed, this));
}
void
-DCPTimeline::paint ()
+Timeline::paint ()
{
wxPaintDC dc (this);
}
void
-DCPTimeline::playlist_changed ()
+Timeline::playlist_changed ()
{
ensure_ui_thread ();
}
assign_tracks ();
- setup_pixels_per_time_unit ();
+ setup_pixels_per_second ();
Refresh ();
}
void
-DCPTimeline::assign_tracks ()
+Timeline::assign_tracks ()
{
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (*i);
}
int
-DCPTimeline::tracks () const
+Timeline::tracks () const
{
return _tracks;
}
void
-DCPTimeline::setup_pixels_per_time_unit ()
+Timeline::setup_pixels_per_second ()
{
shared_ptr<const Film> film = _film.lock ();
if (!film || film->length() == 0) {
return;
}
- _pixels_per_time_unit = static_cast<double>(width() - x_offset() * 2) / film->length ();
+ _pixels_per_second = static_cast<double>(width() - x_offset() * 2) / film->length().seconds ();
}
shared_ptr<View>
-DCPTimeline::event_to_view (wxMouseEvent& ev)
+Timeline::event_to_view (wxMouseEvent& ev)
{
ViewList::iterator i = _views.begin();
Position<int> const p (ev.GetX(), ev.GetY());
}
void
-DCPTimeline::left_down (wxMouseEvent& ev)
+Timeline::left_down (wxMouseEvent& ev)
{
shared_ptr<View> view = event_to_view (ev);
shared_ptr<ContentView> content_view = dynamic_pointer_cast<ContentView> (view);
}
void
-DCPTimeline::left_up (wxMouseEvent& ev)
+Timeline::left_up (wxMouseEvent& ev)
{
_left_down = false;
}
void
-DCPTimeline::mouse_moved (wxMouseEvent& ev)
+Timeline::mouse_moved (wxMouseEvent& ev)
{
if (!_left_down) {
return;
}
void
-DCPTimeline::right_down (wxMouseEvent& ev)
+Timeline::right_down (wxMouseEvent& ev)
{
shared_ptr<View> view = event_to_view (ev);
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (view);
}
void
-DCPTimeline::set_position_from_event (wxMouseEvent& ev)
+Timeline::set_position_from_event (wxMouseEvent& ev)
{
wxPoint const p = ev.GetPosition();
return;
}
- DCPTime new_position = _down_view_position + (p.x - _down_point.x) / _pixels_per_time_unit;
+ DCPTime new_position = _down_view_position + DCPTime::from_seconds ((p.x - _down_point.x) / _pixels_per_second);
if (_snap) {
bool first = true;
- DCPTime nearest_distance = TIME_MAX;
- DCPTime nearest_new_position = TIME_MAX;
+ DCPTime nearest_distance = DCPTime::max ();
+ DCPTime nearest_new_position = DCPTime::max ();
/* Find the nearest content edge; this is inefficient */
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
{
/* Snap starts to ends */
- DCPTime const d = abs (cv->content()->end() - new_position);
+ DCPTime const d = DCPTime (cv->content()->end() - new_position).abs ();
if (first || d < nearest_distance) {
nearest_distance = d;
nearest_new_position = cv->content()->end();
{
/* Snap ends to starts */
- DCPTime const d = abs (cv->content()->position() - (new_position + _down_view->content()->length_after_trim()));
+ DCPTime const d = DCPTime (
+ cv->content()->position() - (new_position + _down_view->content()->length_after_trim())
+ ).abs ();
+
if (d < nearest_distance) {
nearest_distance = d;
nearest_new_position = cv->content()->position() - _down_view->content()->length_after_trim ();
if (!first) {
/* Snap if it's close; `close' means within a proportion of the time on the timeline */
- if (nearest_distance < (width() / pixels_per_time_unit()) / 32) {
+ if (nearest_distance < (width() / pixels_per_second()) / 32) {
new_position = nearest_new_position;
}
}
}
if (new_position < 0) {
- new_position = 0;
+ new_position = DCPTime ();
}
_down_view->content()->set_position (new_position);
}
void
-DCPTimeline::force_redraw (dcpomatic::Rect<int> const & r)
+Timeline::force_redraw (dcpomatic::Rect<int> const & r)
{
RefreshRect (wxRect (r.x, r.y, r.width, r.height), false);
}
shared_ptr<const Film>
-DCPTimeline::film () const
+Timeline::film () const
{
return _film.lock ();
}
void
-DCPTimeline::resized ()
+Timeline::resized ()
{
- setup_pixels_per_time_unit ();
+ setup_pixels_per_second ();
}
void
-DCPTimeline::clear_selection ()
+Timeline::clear_selection ()
{
for (ViewList::iterator i = _views.begin(); i != _views.end(); ++i) {
shared_ptr<ContentView> cv = dynamic_pointer_cast<ContentView> (*i);
}
}
-DCPTimeline::ContentViewList
-DCPTimeline::selected_views () const
+Timeline::ContentViewList
+Timeline::selected_views () const
{
ContentViewList sel;
}
ContentList
-DCPTimeline::selected_content () const
+Timeline::selected_content () const
{
ContentList sel;
ContentViewList views = selected_views ();
class View;
class ContentView;
class FilmEditor;
-class DCPTimeAxisView;
+class TimeAxisView;
-class DCPTimeline : public wxPanel
+class Timeline : public wxPanel
{
public:
- DCPTimeline (wxWindow *, FilmEditor *, boost::shared_ptr<Film>);
+ Timeline (wxWindow *, FilmEditor *, boost::shared_ptr<Film>);
boost::shared_ptr<const Film> film () const;
return 48;
}
- double pixels_per_time_unit () const {
- return _pixels_per_time_unit;
+ double pixels_per_second () const {
+ return _pixels_per_second;
}
Position<int> tracks_position () const {
int tracks () const;
- void setup_pixels_per_time_unit ();
+ void setup_pixels_per_second ();
void set_snap (bool s) {
_snap = s;
FilmEditor* _film_editor;
boost::weak_ptr<Film> _film;
ViewList _views;
- boost::shared_ptr<DCPTimeAxisView> _time_axis_view;
+ boost::shared_ptr<TimeAxisView> _time_axis_view;
int _tracks;
- double _pixels_per_time_unit;
+ double _pixels_per_second;
bool _left_down;
wxPoint _down_point;
boost::shared_ptr<ContentView> _down_view;
using std::cout;
using boost::shared_ptr;
-DCPTimelineDialog::DCPTimelineDialog (FilmEditor* ed, shared_ptr<Film> film)
- : wxDialog (ed, wxID_ANY, _("DCPTimeline"), wxDefaultPosition, wxSize (640, 512), wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER | wxFULL_REPAINT_ON_RESIZE)
+TimelineDialog::TimelineDialog (FilmEditor* ed, shared_ptr<Film> film)
+ : wxDialog (ed, wxID_ANY, _("Timeline"), wxDefaultPosition, wxSize (640, 512), wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER | wxFULL_REPAINT_ON_RESIZE)
, _timeline (this, ed, film)
{
wxBoxSizer* sizer = new wxBoxSizer (wxVERTICAL);
sizer->SetSizeHints (this);
_snap->SetValue (_timeline.snap ());
- _snap->Bind (wxEVT_COMMAND_CHECKBOX_CLICKED, boost::bind (&DCPTimelineDialog::snap_toggled, this));
+ _snap->Bind (wxEVT_COMMAND_CHECKBOX_CLICKED, boost::bind (&TimelineDialog::snap_toggled, this));
}
void
-DCPTimelineDialog::snap_toggled ()
+TimelineDialog::snap_toggled ()
{
_timeline.set_snap (_snap->GetValue ());
}
class Playlist;
-class DCPTimelineDialog : public wxDialog
+class TimelineDialog : public wxDialog
{
public:
- DCPTimelineDialog (FilmEditor *, boost::shared_ptr<Film>);
+ TimelineDialog (FilmEditor *, boost::shared_ptr<Film>);
private:
void snap_toggled ();
- DCPTimeline _timeline;
+ Timeline _timeline;
wxCheckBox* _snap;
};
if (content) {
_position->set (content->position (), _editor->film()->video_frame_rate ());
} else {
- _position->set (0, 24);
+ _position->set (DCPTime () , 24);
}
} else if (property == ContentProperty::LENGTH || property == VideoContentProperty::VIDEO_FRAME_RATE) {
if (content) {
_full_length->set (content->full_length (), _editor->film()->video_frame_rate ());
_play_length->set (content->length_after_trim (), _editor->film()->video_frame_rate ());
} else {
- _full_length->set (0, 24);
- _play_length->set (0, 24);
+ _full_length->set (DCPTime (), 24);
+ _play_length->set (DCPTime (), 24);
}
} else if (property == ContentProperty::TRIM_START) {
if (content) {
_trim_start->set (content->trim_start (), _editor->film()->video_frame_rate ());
_play_length->set (content->length_after_trim (), _editor->film()->video_frame_rate ());
} else {
- _trim_start->set (0, 24);
- _play_length->set (0, 24);
+ _trim_start->set (DCPTime (), 24);
+ _play_length->set (DCPTime (), 24);
}
} else if (property == ContentProperty::TRIM_END) {
if (content) {
_trim_end->set (content->trim_end (), _editor->film()->video_frame_rate ());
_play_length->set (content->length_after_trim (), _editor->film()->video_frame_rate ());
} else {
- _trim_end->set (0, 24);
- _play_length->set (0, 24);
+ _trim_end->set (DCPTime (), 24);
+ _play_length->set (DCPTime (), 24);
}
}
if (c.size() == 1) {
shared_ptr<ImageContent> ic = dynamic_pointer_cast<ImageContent> (c.front ());
if (ic && ic->still ()) {
- ic->set_video_length (rint (_full_length->get (_editor->film()->video_frame_rate()) * ic->video_frame_rate() / TIME_HZ));
+ /* XXX: No effective FRC here... is this right? */
+ ic->set_video_length (ContentTime (_full_length->get (_editor->film()->video_frame_rate()), FrameRateChange (1, 1)));
}
}
}
static shared_ptr<const AudioBuffers> last_audio;
-static int
-pass_through (int x)
-{
- return x;
-}
-
BOOST_AUTO_TEST_CASE (audio_merger_test1)
{
- AudioMerger<int, int> merger (1, bind (&pass_through, _1), boost::bind (&pass_through, _1));
+ int const frame_rate = 48000;
+ AudioMerger merger (1, frame_rate);
/* Push 64 samples, 0 -> 63 at time 0 */
shared_ptr<AudioBuffers> buffers (new AudioBuffers (1, 64));
for (int i = 0; i < 64; ++i) {
buffers->data()[0][i] = i;
}
- merger.push (buffers, 0);
+ merger.push (buffers, DCPTime ());
/* Push 64 samples, 0 -> 63 at time 22 */
- merger.push (buffers, 22);
+ merger.push (buffers, DCPTime::from_frames (22, frame_rate));
- TimedAudioBuffers<int> tb = merger.pull (22);
+ TimedAudioBuffers<DCPTime> tb = merger.pull (DCPTime::from_frames (22, frame_rate));
BOOST_CHECK (tb.audio != shared_ptr<const AudioBuffers> ());
BOOST_CHECK_EQUAL (tb.audio->frames(), 22);
BOOST_CHECK_EQUAL (tb.time, 0);
/* That flush should give us 64 samples at 22 */
BOOST_CHECK_EQUAL (tb.audio->frames(), 64);
- BOOST_CHECK_EQUAL (tb.time, 22);
+ BOOST_CHECK_EQUAL (tb.time, DCPTime::from_frames (22, frame_rate));
/* Check the sample values */
for (int i = 0; i < 64; ++i) {
BOOST_AUTO_TEST_CASE (audio_merger_test2)
{
- AudioMerger<int, int> merger (1, bind (&pass_through, _1), boost::bind (&pass_through, _1));
+ int const frame_rate = 48000;
+ AudioMerger merger (1, frame_rate);
/* Push 64 samples, 0 -> 63 at time 9 */
shared_ptr<AudioBuffers> buffers (new AudioBuffers (1, 64));
for (int i = 0; i < 64; ++i) {
buffers->data()[0][i] = i;
}
- merger.push (buffers, 9);
+ merger.push (buffers, DCPTime::from_frames (9, frame_rate));
- TimedAudioBuffers<int> tb = merger.pull (9);
+ TimedAudioBuffers<DCPTime> tb = merger.pull (DCPTime::from_frames (9, frame_rate));
BOOST_CHECK_EQUAL (tb.audio->frames(), 9);
BOOST_CHECK_EQUAL (tb.time, 0);
film->examine_and_add_content (contentB);
wait_for_jobs ();
- contentA->set_video_length (3);
- contentA->set_position (film->video_frames_to_time (2));
- contentB->set_video_length (1);
- contentB->set_position (film->video_frames_to_time (7));
+ contentA->set_video_length (ContentTime::from_frames (3, 24));
+ contentA->set_position (DCPTime::from_frames (2, film->video_frame_rate ()));
+ contentB->set_video_length (ContentTime::from_frames (1, 24));
+ contentB->set_position (DCPTime::from_frames (7, film->video_frame_rate ()));
film->make_dcp ();
print_time (DCPTime t, float fps)
{
stringstream s;
- s << t << " " << (float(t) / TIME_HZ) << "s " << (float(t) * fps / TIME_HZ) << "f";
+ s << t << " " << t.seconds() << "s " << t.frames (fps) << "f";
return s.str ();
}
BOOST_CHECK (first_video.get() >= t);
BOOST_CHECK (first_audio.get() >= t);
/* And should be rounded to frame boundaries */
- BOOST_CHECK ((first_video.get() % (TIME_HZ / film->video_frame_rate())) == 0);
- BOOST_CHECK ((first_audio.get() % (TIME_HZ / film->audio_frame_rate())) == 0);
+ BOOST_CHECK (first_video.get() == first_video.get().round_up (film->video_frame_rate()));
+ BOOST_CHECK (first_audio.get() == first_audio.get().round_up (film->audio_frame_rate()));
}
/* Test basic seeking */
player->Video.connect (boost::bind (&process_video, _1, _2, _3, _4, _5));
player->Audio.connect (boost::bind (&process_audio, _1, _2));
- check (player, 0);
- check (player, 0.1 * TIME_HZ);
- check (player, 0.2 * TIME_HZ);
- check (player, 0.3 * TIME_HZ);
+ check (player, DCPTime::from_seconds (0));
+ check (player, DCPTime::from_seconds (0.1));
+ check (player, DCPTime::from_seconds (0.2));
+ check (player, DCPTime::from_seconds (0.3));
}
print_time (DCPTime t, float fps)
{
stringstream s;
- s << t << " " << (float(t) / TIME_HZ) << "s " << (float(t) * fps / TIME_HZ) << "f";
+ s << t << " " << t.seconds() << "s " << t.frames(fps) << "f";
return s.str ();
}
player->Audio.connect (boost::bind (&process_audio, _1, _2));
for (float i = 0; i < 10; i += 0.1) {
- check (player, i * TIME_HZ);
+ check (player, DCPTime::from_seconds (i));
}
}
BOOST_CHECK_EQUAL (A->position(), 0);
/* A is 16 frames long at 25 fps */
- BOOST_CHECK_EQUAL (B->position(), 16 * TIME_HZ / 25);
+ BOOST_CHECK_EQUAL (B->position(), DCPTime::from_frames (16, 25));
shared_ptr<Player> player = film->make_player ();
PlayerWrapper wrap (player);
}
}
- player->seek (6 * TIME_HZ / 25, true);
+ player->seek (DCPTime::from_frames (6, 25), true);
optional<Video> v = wrap.get_video ();
BOOST_CHECK (v);
- BOOST_CHECK_EQUAL (v.get().time, 6 * TIME_HZ / 25);
+ BOOST_CHECK_EQUAL (v.get().time, DCPTime::from_frames (6, 25));
}
#endif
wait_for_jobs ();
- imc->set_video_length (1);
+ imc->set_video_length (ContentTime::from_frames (1, 24));
scaling_test_for (film, imc, "133", "185");
scaling_test_for (film, imc, "185", "185");
FFmpegDecoder decoder (film, content, true, false);
shared_ptr<DecodedVideo> a = dynamic_pointer_cast<DecodedVideo> (decoder.peek ());
- decoder.seek (0, true);
+ decoder.seek (ContentTime(), true);
shared_ptr<DecodedVideo> b = dynamic_pointer_cast<DecodedVideo> (decoder.peek ());
/* a will be after no seek, and b after a seek to zero, which should
have the same effect.
*/
- BOOST_CHECK_EQUAL (a->frame, b->frame);
+ BOOST_CHECK_EQUAL (a->content_time, b->content_time);
}
/** Test SubRip::convert_time */
BOOST_AUTO_TEST_CASE (subrip_time_test)
{
- BOOST_CHECK_EQUAL (SubRip::convert_time ("00:03:10,500"), rint (((3 * 60) + 10 + 0.5) * TIME_HZ));
- BOOST_CHECK_EQUAL (SubRip::convert_time ("04:19:51,782"), rint (((4 * 3600) + (19 * 60) + 51 + 0.782) * TIME_HZ));
+ BOOST_CHECK_EQUAL (SubRip::convert_time ("00:03:10,500"), ContentTime::from_seconds ((3 * 60) + 10 + 0.5));
+ BOOST_CHECK_EQUAL (SubRip::convert_time ("04:19:51,782"), ContentTime::from_seconds ((4 * 3600) + (19 * 60) + 51 + 0.782));
}
/** Test SubRip::convert_coordinate */
{
shared_ptr<SubRipContent> content (new SubRipContent (shared_ptr<Film> (), "test/data/subrip.srt"));
content->examine (shared_ptr<Job> ());
- BOOST_CHECK_EQUAL (content->full_length(), ((3 * 60) + 56.471) * TIME_HZ);
+ BOOST_CHECK_EQUAL (content->full_length(), DCPTime::from_seconds ((3 * 60) + 56.471));
SubRip s (content);
vector<SubRipSubtitle>::const_iterator i = s._subtitles.begin();
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((1 * 60) + 49.200) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((1 * 60) + 52.351) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 49.200));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 52.351));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "This is a subtitle, and it goes over two lines.");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((1 * 60) + 52.440) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((1 * 60) + 54.351) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 52.440));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 54.351));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "We have emboldened this");
BOOST_CHECK_EQUAL (i->pieces.front().bold, true);
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((1 * 60) + 54.440) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((1 * 60) + 56.590) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 54.440));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 56.590));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "And italicised this.");
BOOST_CHECK_EQUAL (i->pieces.front().italic, true);
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((1 * 60) + 56.680) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((1 * 60) + 58.955) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 56.680));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 58.955));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Shall I compare thee to a summers' day?");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((2 * 60) + 0.840) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((2 * 60) + 3.400) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((2 * 60) + 0.840));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((2 * 60) + 3.400));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Is this a dagger I see before me?");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, ((3 * 60) + 54.560) * TIME_HZ);
- BOOST_CHECK_EQUAL (i->to, ((3 * 60) + 56.471) * TIME_HZ);
+ BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((3 * 60) + 54.560));
+ BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((3 * 60) + 56.471));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Hello world.");
{
shared_ptr<SubRipContent> content (new SubRipContent (shared_ptr<Film> (), "test/data/subrip.srt"));
content->examine (shared_ptr<Job> ());
- BOOST_CHECK_EQUAL (content->full_length(), ((3 * 60) + 56.471) * TIME_HZ);
+ BOOST_CHECK_EQUAL (content->full_length(), DCPTime::from_seconds ((3 * 60) + 56.471));
shared_ptr<Film> film = new_test_film ("subrip_render_test");
BOOST_CHECK_THROW (md5_digest (p, shared_ptr<Job> ()), std::runtime_error);
}
-/* Straightforward test of time_round_up_test */
-BOOST_AUTO_TEST_CASE (time_round_up_test)
+/* Straightforward test of DCPTime::round_up */
+BOOST_AUTO_TEST_CASE (dcptime_round_up_test)
{
- BOOST_CHECK_EQUAL (time_round_up (0, 2), 0);
- BOOST_CHECK_EQUAL (time_round_up (1, 2), 2);
- BOOST_CHECK_EQUAL (time_round_up (2, 2), 2);
- BOOST_CHECK_EQUAL (time_round_up (3, 2), 4);
+ BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 2), 0);
+ BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 2), 2);
+ BOOST_CHECK_EQUAL (DCPTime (2).round_up (DCPTime::HZ / 2), 2);
+ BOOST_CHECK_EQUAL (DCPTime (3).round_up (DCPTime::HZ / 2), 4);
- BOOST_CHECK_EQUAL (time_round_up (0, 42), 0);
- BOOST_CHECK_EQUAL (time_round_up (1, 42), 42);
- BOOST_CHECK_EQUAL (time_round_up (42, 42), 42);
- BOOST_CHECK_EQUAL (time_round_up (43, 42), 84);
+ BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 42), 0);
+ BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 42), 42);
+ BOOST_CHECK_EQUAL (DCPTime (42).round_up (DCPTime::HZ / 42), 42);
+ BOOST_CHECK_EQUAL (DCPTime (43).round_up (DCPTime::HZ / 42), 84);
}