string
AudioContent::technical_summary () const
{
- return String::compose ("audio: channels %1, length %2, raw rate %3, out rate %4", audio_channels(), audio_length(), content_audio_frame_rate(), output_audio_frame_rate());
+ return String::compose (
+ "audio: channels %1, length %2, raw rate %3, out rate %4",
+ audio_channels(),
+ audio_length().seconds(),
+ content_audio_frame_rate(),
+ output_audio_frame_rate()
+ );
}
, _change_signals_frequent (false)
{
for (size_t i = 0; i < c.size(); ++i) {
- if (i > 0 && c[i]->trim_start ()) {
+ if (i > 0 && c[i]->trim_start() > DCPTime()) {
throw JoinError (_("Only the first piece of content to be joined can have a start trim."));
}
- if (i < (c.size() - 1) && c[i]->trim_end ()) {
+ if (i < (c.size() - 1) && c[i]->trim_end () > DCPTime()) {
throw JoinError (_("Only the last piece of content to be joined can have an end trim."));
}
string
Content::technical_summary () const
{
- return String::compose ("%1 %2 %3", path_summary(), digest(), position());
+ return String::compose ("%1 %2 %3", path_summary(), digest(), position().seconds());
}
DCPTime
stringstream s;
s << Content::digest()
- << "_" << position()
- << "_" << trim_start()
- << "_" << trim_end();
+ << "_" << position().get()
+ << "_" << trim_start().get()
+ << "_" << trim_end().get();
return s.str ();
}
#include "dcpomatic_time.h"
+using std::ostream;
+
ContentTime::ContentTime (DCPTime d, FrameRateChange f)
: Time (rint (d.get() * f.speed_up))
{
return b;
}
+
+ostream &
+operator<< (ostream& s, ContentTime t)
+{
+ s << "[CONT " << t.get() << " " << t.seconds() << "s]";
+ return s;
+}
+
+ostream &
+operator<< (ostream& s, DCPTime t)
+{
+ s << "[DCP " << t.get() << " " << t.seconds() << "s]";
+ return s;
+}
#define DCPOMATIC_TIME_H
#include <cmath>
+#include <ostream>
#include <stdint.h>
#include "frame_rate_change.h"
return rint (_t * r / HZ);
}
- operator bool () const {
- return _t != 0;
- }
-
protected:
friend class dcptime_round_up_test;
return *this;
}
+ ContentTime operator- () const {
+ return ContentTime (-_t);
+ }
+
ContentTime operator- (ContentTime const & o) const {
return ContentTime (_t - o._t);
}
return *this;
}
+ /** Round up to the nearest sampling interval
+ * at some sampling rate.
+ * @param r Sampling rate.
+ */
+ ContentTime round_up (int r) {
+ int64_t const n = HZ / r;
+ int64_t const a = _t + n - 1;
+ return ContentTime (a - (a % n));
+ }
+
+
static ContentTime from_seconds (double s) {
return ContentTime (s * HZ);
}
}
};
+std::ostream& operator<< (std::ostream& s, ContentTime t);
+
class DCPTime : public Time
{
public:
};
DCPTime min (DCPTime a, DCPTime b);
+std::ostream& operator<< (std::ostream& s, DCPTime t);
#endif
}
if (_first_video) {
- node->add_child("FirstVideo")->add_child_text (lexical_cast<string> (_first_video.get ()));
+ node->add_child("FirstVideo")->add_child_text (lexical_cast<string> (_first_video.get().get()));
}
}
assert (film);
shared_ptr<FFmpegExaminer> examiner (new FFmpegExaminer (shared_from_this ()));
+ take_from_video_examiner (examiner);
ContentTime video_length = examiner->video_length ();
film->log()->log (String::compose ("Video length obtained from header as %1 frames", video_length.frames (video_frame_rate ())));
_first_video = examiner->first_video ();
}
- take_from_video_examiner (examiner);
-
signal_changed (ContentProperty::LENGTH);
signal_changed (FFmpegContentProperty::SUBTITLE_STREAMS);
signal_changed (FFmpegContentProperty::SUBTITLE_STREAM);
string
FFmpegContent::information () const
{
- if (video_length() == ContentTime (0) || video_frame_rate() == ContentTime (0)) {
+ if (video_length() == ContentTime (0) || video_frame_rate() == 0) {
return "";
}
stringstream s;
- s << String::compose (_("%1 frames; %2 frames per second"), video_length(), video_frame_rate()) << "\n";
+ s << String::compose (_("%1 frames; %2 frames per second"), video_length().frames (video_frame_rate()), video_frame_rate()) << "\n";
s << VideoContent::information ();
return s.str ();
root->add_child("FrameRate")->add_child_text (lexical_cast<string> (frame_rate));
root->add_child("Channels")->add_child_text (lexical_cast<string> (channels));
if (first_audio) {
- root->add_child("FirstAudio")->add_child_text (lexical_cast<string> (first_audio.get ()));
+ root->add_child("FirstAudio")->add_child_text (lexical_cast<string> (first_audio.get().get()));
}
mapping.as_xml (root->add_child("Mapping"));
}
int frame_rate;
int channels;
AudioMapping mapping;
- boost::optional<double> first_audio;
+ boost::optional<ContentTime> first_audio;
private:
friend class ffmpeg_pts_offset_test;
void set_subtitle_stream (boost::shared_ptr<FFmpegSubtitleStream>);
void set_audio_stream (boost::shared_ptr<FFmpegAudioStream>);
- boost::optional<double> first_video () const {
+ boost::optional<ContentTime> first_video () const {
boost::mutex::scoped_lock lm (_mutex);
return _first_video;
}
boost::shared_ptr<FFmpegSubtitleStream> _subtitle_stream;
std::vector<boost::shared_ptr<FFmpegAudioStream> > _audio_streams;
boost::shared_ptr<FFmpegAudioStream> _audio_stream;
- boost::optional<double> _first_video;
+ boost::optional<ContentTime> _first_video;
/** Video filters that should be used when generating DCPs */
std::vector<Filter const *> _filters;
};
/* Now adjust both so that the video pts starts on a frame */
if (have_video && have_audio) {
- double first_video = c->first_video().get() + _pts_offset;
- double const old_first_video = first_video;
-
- /* Round the first video up to a frame boundary */
- if (fabs (rint (first_video * c->video_frame_rate()) - first_video * c->video_frame_rate()) > 1e-6) {
- first_video = ceil (first_video * c->video_frame_rate()) / c->video_frame_rate ();
- }
-
- _pts_offset += first_video - old_first_video;
+ ContentTime first_video = c->first_video().get() + _pts_offset;
+ ContentTime const old_first_video = first_video;
+ _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
}
}
int finished = 0;
r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
if (r >= 0 && finished) {
- last_video = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset);
+ last_video = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
}
} else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, _packet.stream_index)) {
int finished;
r = avcodec_decode_audio4 (audio_codec_context(), _frame, &finished, &_packet);
if (r >= 0 && finished) {
- last_audio = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base + _pts_offset);
+ last_audio = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
}
copy_packet.data += r;
void
FFmpegDecoder::seek_and_flush (ContentTime t)
{
- int64_t s = (t.seconds() - _pts_offset) / av_q2d (_format_context->streams[_video_stream]->time_base);
+ ContentTime const u = t - _pts_offset;
+ int64_t s = u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base);
if (_ffmpeg_content->audio_stream ()) {
s = min (
- s, int64_t ((t.seconds() - _pts_offset) / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base))
+ s, int64_t (u.seconds() / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base))
);
}
if (frame_finished) {
ContentTime const ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
- av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base)
- + _pts_offset
- );
+ av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base))
+ + _pts_offset;
int const data_size = av_samples_get_buffer_size (
0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
}
if (i->second != AV_NOPTS_VALUE) {
- video (image, false, ContentTime::from_seconds (i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset));
+ video (image, false, ContentTime::from_seconds (i->second * av_q2d (_format_context->streams[_video_stream]->time_base)) + _pts_offset);
} else {
shared_ptr<const Film> film = _film.lock ();
assert (film);
throw DecodeError (_("multi-part subtitles not yet supported"));
}
- /* Subtitle PTS in seconds (within the source, not taking into account any of the
+ /* Subtitle PTS (within the source, not taking into account any of the
source that we may have chopped off for the DCP)
*/
- double const packet_time = (static_cast<double> (sub.pts) / AV_TIME_BASE) + _pts_offset;
+ ContentTime packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE) + _pts_offset;
/* hence start time for this sub */
- ContentTime const from = ContentTime::from_seconds (packet_time + (double (sub.start_display_time) / 1e3));
- ContentTime const to = ContentTime::from_seconds (packet_time + (double (sub.end_display_time) / 1e3));
+ ContentTime const from = packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3);
+ ContentTime const to = packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3);
AVSubtitleRect const * rect = sub.rects[0];
bool _decode_video;
bool _decode_audio;
- double _pts_offset;
+ ContentTime _pts_offset;
};
AVStream* s = _format_context->streams[_video_stream];
if (s->avg_frame_rate.num && s->avg_frame_rate.den) {
+ cout << "here we bitchen well are " << av_q2d (s->avg_frame_rate) << "\n";
return av_q2d (s->avg_frame_rate);
}
FFmpegExaminer::video_length () const
{
ContentTime const length = ContentTime::from_seconds (double (_format_context->duration) / AV_TIME_BASE);
- return ContentTime (1, length.get ());
+ return ContentTime (max (int64_t (1), length.get ()));
}
string
{
stringstream s;
s << VideoContent::identifier ();
- s << "_" << video_length();
+ s << "_" << video_length().get();
return s.str ();
}
dec->set_dcp_times ((*i)->frc, offset);
DCPTime const t = dec->dcp_time - offset;
- cout << "Peeked " << (*i)->content->paths()[0] << " for " << t << " cf " << ((*i)->content->full_length() - (*i)->content->trim_end ()) << "\n";
if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) {
/* In the end-trimmed part; decoder has nothing else to give us */
dec.reset ();
void
Player::emit_silence (DCPTime most)
{
- if (most == 0) {
+ if (most == DCPTime ()) {
return;
}
PlayerStatistics::dump (shared_ptr<Log> log) const
{
log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat));
- log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence));
+ log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()));
}
PlayerStatistics const &
/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
, skip (0)
{}
- int64_t silence;
+ DCPTime silence;
int64_t good;
int64_t skip;
} audio;
_("%1 channels, %2kHz, %3 samples"),
audio_channels(),
content_audio_frame_rate() / 1000.0,
- audio_length()
+ audio_length().frames (content_audio_frame_rate ())
);
return s.str ();
AudioContent::as_xml (node);
node->add_child("AudioChannels")->add_child_text (lexical_cast<string> (audio_channels ()));
- node->add_child("AudioLength")->add_child_text (lexical_cast<string> (audio_length ()));
+ node->add_child("AudioLength")->add_child_text (lexical_cast<string> (audio_length().get ()));
node->add_child("AudioFrameRate")->add_child_text (lexical_cast<string> (content_audio_frame_rate ()));
_audio_mapping.as_xml (node->add_child("AudioMapping"));
}
node->add_child("Type")->add_child_text ("SubRip");
Content::as_xml (node);
SubtitleContent::as_xml (node);
- node->add_child("Length")->add_child_text (lexical_cast<string> (_length));
+ node->add_child("Length")->add_child_text (lexical_cast<string> (_length.get ()));
}
DCPTime
VideoContent::as_xml (xmlpp::Node* node) const
{
boost::mutex::scoped_lock lm (_mutex);
- node->add_child("VideoLength")->add_child_text (lexical_cast<string> (_video_length));
+ node->add_child("VideoLength")->add_child_text (lexical_cast<string> (_video_length.get ()));
node->add_child("VideoWidth")->add_child_text (lexical_cast<string> (_video_size.width));
node->add_child("VideoHeight")->add_child_text (lexical_cast<string> (_video_size.height));
node->add_child("VideoFrameRate")->add_child_text (lexical_cast<string> (_video_frame_rate));
/* These examiner calls could call other content methods which take a lock on the mutex */
dcp::Size const vs = d->video_size ();
float const vfr = d->video_frame_rate ();
+ cout << "taking " << vfr << "\n";
{
boost::mutex::scoped_lock lm (_mutex);
_video_size = vs;
_video_frame_rate = vfr;
+ cout << "and then " << _video_frame_rate << "\n";
}
signal_changed (VideoContentProperty::VIDEO_SIZE);
string
VideoContent::technical_summary () const
{
- return String::compose ("video: length %1, size %2x%3, rate %4", video_length(), video_size().width, video_size().height, video_frame_rate());
+ return String::compose (
+ "video: length %1, size %2x%3, rate %4",
+ video_length().seconds(),
+ video_size().width,
+ video_size().height,
+ video_frame_rate()
+ );
}
dcp::Size
_last_written_frame = qi.frame;
_last_written_eyes = qi.eyes;
- if (_film->length()) {
- shared_ptr<Job> job = _job.lock ();
- assert (job);
- int64_t total = _film->length().frames (_film->video_frame_rate ());
- if (_film->three_d ()) {
- /* _full_written and so on are incremented for each eye, so we need to double the total
- frames to get the correct progress.
- */
- total *= 2;
- }
+ shared_ptr<Job> job = _job.lock ();
+ assert (job);
+ int64_t total = _film->length().frames (_film->video_frame_rate ());
+ if (_film->three_d ()) {
+ /* _full_written and so on are incremented for each eye, so we need to double the total
+ frames to get the correct progress.
+ */
+ total *= 2;
+ }
+ if (total) {
job->set_progress (float (_full_written + _fake_written + _repeat_written) / total);
}
}
DCPTime const len = _film->length ();
- if (len) {
- int const new_slider_position = 4096 * _player->video_position() / len;
+ if (len.get ()) {
+ int const new_slider_position = 4096 * _player->video_position().get() / len.get();
if (new_slider_position != _slider->GetValue()) {
_slider->SetValue (new_slider_position);
}
*/
DCPTime p = _player->video_position() - DCPTime::from_frames (2, _film->video_frame_rate ());
- if (p < 0) {
+ if (p < DCPTime ()) {
p = DCPTime ();
}
} catch (boost::thread_interrupted &) {
return "";
}
-
- if (_film->length()) {
+
+ uint64_t const frames = _film->length().frames (_film->video_frame_rate ());
+ if (frames) {
/* XXX: encoded_frames() should check which frames have been encoded */
- u << " (" << (_film->encoded_frames() * 100 / _film->length().frames (_film->video_frame_rate ())) << "%)";
+ u << " (" << (_film->encoded_frames() * 100 / frames) << "%)";
}
return u.str ();
}
protected:
virtual void do_paint (wxGraphicsContext *) = 0;
- int time_x (double t) const
+ int time_x (DCPTime t) const
{
- return _timeline.tracks_position().x + t * _timeline.pixels_per_second ();
+ return _timeline.tracks_position().x + t.seconds() * _timeline.pixels_per_second ();
}
Timeline& _timeline;
gc->StrokePath (path);
/* Time in seconds */
- double t;
- while ((t * _timeline.pixels_per_second()) < _timeline.width()) {
+ DCPTime t;
+ while ((t.seconds() * _timeline.pixels_per_second()) < _timeline.width()) {
wxGraphicsPath path = gc->CreatePath ();
path.MoveToPoint (time_x (t), _y - 4);
path.AddLineToPoint (time_x (t), _y + 4);
gc->StrokePath (path);
- double tc = t;
+ double tc = t.seconds ();
int const h = tc / 3600;
tc -= h * 3600;
int const m = tc / 60;
wxDouble str_leading;
gc->GetTextExtent (str, &str_width, &str_height, &str_descent, &str_leading);
- int const tx = _timeline.x_offset() + t * _timeline.pixels_per_second();
+ int const tx = _timeline.x_offset() + t.seconds() * _timeline.pixels_per_second();
if ((tx + str_width) < _timeline.width()) {
gc->DrawText (str, time_x (t), _y + 16);
}
- t += mark_interval;
+ t += DCPTime::from_seconds (mark_interval);
}
}
Timeline::setup_pixels_per_second ()
{
shared_ptr<const Film> film = _film.lock ();
- if (!film || film->length() == 0) {
+ if (!film || film->length() == DCPTime ()) {
return;
}
if (!first) {
/* Snap if it's close; `close' means within a proportion of the time on the timeline */
- if (nearest_distance < (width() / pixels_per_second()) / 32) {
+ if (nearest_distance < DCPTime::from_seconds ((width() / pixels_per_second()) / 32)) {
new_position = nearest_new_position;
}
}
}
- if (new_position < 0) {
+ if (new_position < DCPTime ()) {
new_position = DCPTime ();
}
AudioAnalysis b ("build/test/audio_analysis_test");
for (int i = 0; i < channels; ++i) {
- BOOST_CHECK (b.points(i) == points);
+ BOOST_CHECK_EQUAL (b.points(i), points);
for (int j = 0; j < points; ++j) {
AudioPoint p = b.get_point (i, j);
BOOST_CHECK_CLOSE (p[AudioPoint::PEAK], random_float (), 1);
TimedAudioBuffers<DCPTime> tb = merger.pull (DCPTime::from_frames (22, frame_rate));
BOOST_CHECK (tb.audio != shared_ptr<const AudioBuffers> ());
BOOST_CHECK_EQUAL (tb.audio->frames(), 22);
- BOOST_CHECK_EQUAL (tb.time, 0);
+ BOOST_CHECK_EQUAL (tb.time, DCPTime ());
/* And they should be a staircase */
for (int i = 0; i < 22; ++i) {
TimedAudioBuffers<DCPTime> tb = merger.pull (DCPTime::from_frames (9, frame_rate));
BOOST_CHECK_EQUAL (tb.audio->frames(), 9);
- BOOST_CHECK_EQUAL (tb.time, 0);
+ BOOST_CHECK_EQUAL (tb.time, DCPTime ());
for (int i = 0; i < 9; ++i) {
BOOST_CHECK_EQUAL (tb.audio->data()[0][i], 0);
/* That flush should give us 64 samples at 9 */
BOOST_CHECK_EQUAL (tb.audio->frames(), 64);
- BOOST_CHECK_EQUAL (tb.time, 9);
+ BOOST_CHECK_EQUAL (tb.time, DCPTime::from_frames (9, frame_rate));
/* Check the sample values */
for (int i = 0; i < 64; ++i) {
BOOST_CHECK (remotely_encoded);
BOOST_CHECK_EQUAL (locally_encoded->size(), remotely_encoded->size());
- BOOST_CHECK (memcmp (locally_encoded->data(), remotely_encoded->data(), locally_encoded->size()) == 0);
+ BOOST_CHECK_EQUAL (memcmp (locally_encoded->data(), remotely_encoded->data(), locally_encoded->size()), 0);
}
BOOST_AUTO_TEST_CASE (client_server_test)
shared_ptr<const dcp::ReelSoundAsset> sound_asset = check.cpls().front()->reels().front()->main_sound ();
BOOST_CHECK (sound_asset);
- BOOST_CHECK (sound_asset->mxf()->channels () == 6);
+ BOOST_CHECK_EQUAL (sound_asset->mxf()->channels (), 6);
/* Sample index in the DCP */
int n = 0;
shared_ptr<FFmpegContent> content (new FFmpegContent (film, "test/data/count300bd24.m2ts"));
shared_ptr<FFmpegExaminer> examiner (new FFmpegExaminer (content));
- BOOST_CHECK_EQUAL (examiner->first_video().get(), 600);
+ BOOST_CHECK_EQUAL (examiner->first_video().get(), ContentTime (600));
BOOST_CHECK_EQUAL (examiner->audio_streams().size(), 1);
- BOOST_CHECK_EQUAL (examiner->audio_streams()[0]->first_audio.get(), 600);
+ BOOST_CHECK_EQUAL (examiner->audio_streams()[0]->first_audio.get(), ContentTime (600));
}
{
/* Sound == video so no offset required */
- content->_first_video = 0;
- content->_audio_stream->first_audio = 0;
+ content->_first_video = ContentTime ();
+ content->_audio_stream->first_audio = ContentTime ();
FFmpegDecoder decoder (film, content, true, true);
- BOOST_CHECK_EQUAL (decoder._pts_offset, 0);
+ BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
{
/* Common offset should be removed */
- content->_first_video = 600;
- content->_audio_stream->first_audio = 600;
+ content->_first_video = ContentTime::from_seconds (600);
+ content->_audio_stream->first_audio = ContentTime::from_seconds (600);
FFmpegDecoder decoder (film, content, true, true);
- BOOST_CHECK_EQUAL (decoder._pts_offset, -600);
+ BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime::from_seconds (-600));
}
{
/* Video is on a frame boundary */
- content->_first_video = 1.0 / 24.0;
- content->_audio_stream->first_audio = 0;
+ content->_first_video = ContentTime::from_frames (1, 24);
+ content->_audio_stream->first_audio = ContentTime ();
FFmpegDecoder decoder (film, content, true, true);
- BOOST_CHECK_EQUAL (decoder._pts_offset, 0);
+ BOOST_CHECK_EQUAL (decoder._pts_offset, ContentTime ());
}
{
/* Video is off a frame boundary */
double const frame = 1.0 / 24.0;
- content->_first_video = frame + 0.0215;
- content->_audio_stream->first_audio = 0;
+ content->_first_video = ContentTime::from_seconds (frame + 0.0215);
+ content->_audio_stream->first_audio = ContentTime ();
FFmpegDecoder decoder (film, content, true, true);
- BOOST_CHECK_CLOSE (decoder._pts_offset, (frame - 0.0215), 0.00001);
+ BOOST_CHECK_CLOSE (decoder._pts_offset.seconds(), (frame - 0.0215), 0.00001);
}
{
/* Video is off a frame boundary and both have a common offset */
double const frame = 1.0 / 24.0;
- content->_first_video = frame + 0.0215 + 4.1;
- content->_audio_stream->first_audio = 4.1;
+ content->_first_video = ContentTime::from_seconds (frame + 0.0215 + 4.1);
+ content->_audio_stream->first_audio = ContentTime::from_seconds (4.1);
FFmpegDecoder decoder (film, content, true, true);
- BOOST_CHECK_EQUAL (decoder._pts_offset, (frame - 0.0215) - 4.1);
+ BOOST_CHECK_EQUAL (decoder._pts_offset.seconds(), (frame - 0.0215) - 4.1);
}
}
print_time (DCPTime t, float fps)
{
stringstream s;
- s << t << " " << t.seconds() << "s " << t.frames (fps) << "f";
+ s << t.seconds() << "s " << t.frames (fps) << "f";
return s.str ();
}
BOOST_CHECK (first_video.get() >= t);
BOOST_CHECK (first_audio.get() >= t);
/* And should be rounded to frame boundaries */
- BOOST_CHECK (first_video.get() == first_video.get().round_up (film->video_frame_rate()));
- BOOST_CHECK (first_audio.get() == first_audio.get().round_up (film->audio_frame_rate()));
+ BOOST_CHECK_EQUAL (first_video.get(), first_video.get().round_up (film->video_frame_rate()));
+ BOOST_CHECK_EQUAL (first_audio.get(), first_audio.get().round_up (film->audio_frame_rate()));
}
/* Test basic seeking */
BOOST_CHECK (t->data() != s->data());
BOOST_CHECK (t->data()[0] != s->data()[0]);
BOOST_CHECK (t->line_size() != s->line_size());
- BOOST_CHECK (t->line_size()[0] == s->line_size()[0]);
+ BOOST_CHECK_EQUAL (t->line_size()[0], s->line_size()[0]);
BOOST_CHECK (t->stride() != s->stride());
- BOOST_CHECK (t->stride()[0] == s->stride()[0]);
+ BOOST_CHECK_EQUAL (t->stride()[0], s->stride()[0]);
/* assignment operator */
Image* u = new Image (PIX_FMT_YUV422P, dcp::Size (150, 150), false);
BOOST_CHECK (u->data() != s->data());
BOOST_CHECK (u->data()[0] != s->data()[0]);
BOOST_CHECK (u->line_size() != s->line_size());
- BOOST_CHECK (u->line_size()[0] == s->line_size()[0]);
+ BOOST_CHECK_EQUAL (u->line_size()[0], s->line_size()[0]);
BOOST_CHECK (u->stride() != s->stride());
- BOOST_CHECK (u->stride()[0] == s->stride()[0]);
+ BOOST_CHECK_EQUAL (u->stride()[0], s->stride()[0]);
delete s;
delete t;
BOOST_CHECK (t->data() != s->data());
BOOST_CHECK (t->data()[0] != s->data()[0]);
BOOST_CHECK (t->line_size() != s->line_size());
- BOOST_CHECK (t->line_size()[0] == s->line_size()[0]);
+ BOOST_CHECK_EQUAL (t->line_size()[0], s->line_size()[0]);
BOOST_CHECK (t->stride() != s->stride());
- BOOST_CHECK (t->stride()[0] == s->stride()[0]);
+ BOOST_CHECK_EQUAL (t->stride()[0], s->stride()[0]);
/* assignment operator */
Image* u = new Image (PIX_FMT_YUV422P, dcp::Size (150, 150), true);
BOOST_CHECK (u->data() != s->data());
BOOST_CHECK (u->data()[0] != s->data()[0]);
BOOST_CHECK (u->line_size() != s->line_size());
- BOOST_CHECK (u->line_size()[0] == s->line_size()[0]);
+ BOOST_CHECK_EQUAL (u->line_size()[0], s->line_size()[0]);
BOOST_CHECK (u->stride() != s->stride());
- BOOST_CHECK (u->stride()[0] == s->stride()[0]);
+ BOOST_CHECK_EQUAL (u->stride()[0], s->stride()[0]);
delete s;
delete t;
film->examine_and_add_content (A);
wait_for_jobs ();
- BOOST_CHECK_EQUAL (A->video_length(), 16);
+ BOOST_CHECK_EQUAL (A->video_length().frames (24), 16);
shared_ptr<FFmpegContent> B (new FFmpegContent (film, "test/data/red_30.mp4"));
film->examine_and_add_content (B);
wait_for_jobs ();
- BOOST_CHECK_EQUAL (B->video_length(), 16);
+ BOOST_CHECK_EQUAL (B->video_length().frames (30), 16);
/* Film should have been set to 25fps */
BOOST_CHECK_EQUAL (film->video_frame_rate(), 25);
- BOOST_CHECK_EQUAL (A->position(), 0);
+ BOOST_CHECK_EQUAL (A->position(), DCPTime ());
/* A is 16 frames long at 25 fps */
BOOST_CHECK_EQUAL (B->position(), DCPTime::from_frames (16, 25));
shared_ptr<const dcp::ReelSoundAsset> sound_asset = check.cpls().front()->reels().front()->main_sound ();
BOOST_CHECK (sound_asset);
- BOOST_CHECK (sound_asset->mxf()->channels () == channels);
+ BOOST_CHECK_EQUAL (sound_asset->mxf()->channels (), channels);
/* Sample index in the DCP */
int n = 0;
vector<SubRipSubtitle>::const_iterator i = s._subtitles.begin();
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 49.200));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 52.351));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((1 * 60) + 49.200));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((1 * 60) + 52.351));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "This is a subtitle, and it goes over two lines.");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 52.440));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 54.351));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((1 * 60) + 52.440));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((1 * 60) + 54.351));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "We have emboldened this");
BOOST_CHECK_EQUAL (i->pieces.front().bold, true);
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 54.440));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 56.590));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((1 * 60) + 54.440));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((1 * 60) + 56.590));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "And italicised this.");
BOOST_CHECK_EQUAL (i->pieces.front().italic, true);
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((1 * 60) + 56.680));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((1 * 60) + 58.955));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((1 * 60) + 56.680));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((1 * 60) + 58.955));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Shall I compare thee to a summers' day?");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((2 * 60) + 0.840));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((2 * 60) + 3.400));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((2 * 60) + 0.840));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((2 * 60) + 3.400));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Is this a dagger I see before me?");
++i;
BOOST_CHECK (i != s._subtitles.end ());
- BOOST_CHECK_EQUAL (i->from, DCPTime::from_seconds ((3 * 60) + 54.560));
- BOOST_CHECK_EQUAL (i->to, DCPTime::from_seconds ((3 * 60) + 56.471));
+ BOOST_CHECK_EQUAL (i->from, ContentTime::from_seconds ((3 * 60) + 54.560));
+ BOOST_CHECK_EQUAL (i->to, ContentTime::from_seconds ((3 * 60) + 56.471));
BOOST_CHECK_EQUAL (i->pieces.size(), 1);
BOOST_CHECK_EQUAL (i->pieces.front().text, "Hello world.");
/*
- Copyright (C) 2012 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
/* Straightforward test of DCPTime::round_up */
BOOST_AUTO_TEST_CASE (dcptime_round_up_test)
{
- BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 2), 0);
- BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 2), 2);
- BOOST_CHECK_EQUAL (DCPTime (2).round_up (DCPTime::HZ / 2), 2);
- BOOST_CHECK_EQUAL (DCPTime (3).round_up (DCPTime::HZ / 2), 4);
+ BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 2), DCPTime (0));
+ BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 2), DCPTime (2));
+ BOOST_CHECK_EQUAL (DCPTime (2).round_up (DCPTime::HZ / 2), DCPTime (2));
+ BOOST_CHECK_EQUAL (DCPTime (3).round_up (DCPTime::HZ / 2), DCPTime (4));
- BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 42), 0);
- BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 42), 42);
- BOOST_CHECK_EQUAL (DCPTime (42).round_up (DCPTime::HZ / 42), 42);
- BOOST_CHECK_EQUAL (DCPTime (43).round_up (DCPTime::HZ / 42), 84);
+ BOOST_CHECK_EQUAL (DCPTime (0).round_up (DCPTime::HZ / 42), DCPTime (0));
+ BOOST_CHECK_EQUAL (DCPTime (1).round_up (DCPTime::HZ / 42), DCPTime (42));
+ BOOST_CHECK_EQUAL (DCPTime (42).round_up (DCPTime::HZ / 42), DCPTime (42));
+ BOOST_CHECK_EQUAL (DCPTime (43).round_up (DCPTime::HZ / 42), DCPTime (84));
}