I believe both are necessary; doing floor instead of round caused #648.
shared_ptr<Player> player (new Player (_film, _playlist));
player->set_ignore_video ();
- int64_t const len = _playlist->length().frames (_film->audio_frame_rate());
+ int64_t const len = _playlist->length().frames_round (_film->audio_frame_rate());
_samples_per_point = max (int64_t (1), len / _num_points);
_current.resize (_film->audio_channels ());
if (_seek_reference) {
/* We've had an accurate seek and now we're seeing some data */
ContentTime const delta = time - _seek_reference.get ();
- Frame const delta_frames = delta.frames (frame_rate);
+ Frame const delta_frames = delta.frames_round (frame_rate);
if (delta_frames > 0) {
/* This data comes after the seek time. Pad the data with some silence. */
shared_ptr<AudioBuffers> padded (new AudioBuffers (data->channels(), data->frames() + delta_frames));
}
if (!_position) {
- _position = time.frames (frame_rate);
+ _position = time.frames_round (frame_rate);
}
DCPOMATIC_ASSERT (_position.get() >= (_decoded.frame + _decoded.audio->frames()));
}
double const vfr = _dcp_content->video_frame_rate ();
- int64_t const frame = _next.frames (vfr);
+ int64_t const frame = _next.frames_round (vfr);
if ((*_reel)->main_picture ()) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
_next += ContentTime::from_frames (1, vfr);
if ((*_reel)->main_picture ()) {
- if (_next.frames (vfr) >= (*_reel)->main_picture()->duration()) {
+ if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
++_reel;
}
}
}
template <typename T>
- int64_t frames (T r) const {
+ int64_t frames_round (T r) const {
+ return llrint (_t * r / HZ);
+ }
+
+ template <typename T>
+ int64_t frames_floor (T r) const {
return floor (_t * r / HZ);
}
/* Do this calculation with frames so that we can round
to a frame boundary at the start rather than the end.
*/
- int64_t ff = frames (r);
+ int64_t ff = frames_round (r);
h = ff / (3600 * r);
ff -= h * 3600 * r;
if (_need_video_length) {
_video_length = frame_time (
_format_context->streams[_video_stream]
- ).get_value_or (ContentTime ()).frames (video_frame_rate().get ());
+ ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
}
}
}
ImageDecoder::seek (ContentTime time, bool accurate)
{
VideoDecoder::seek (time, accurate);
- _video_position = time.frames (_image_content->video_frame_rate ());
+ _video_position = time.frames_round (_image_content->video_frame_rate ());
}
setup_pieces ();
}
- Frame const length_frames = length.frames (_film->audio_frame_rate ());
+ Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
audio->make_silent ();
the stuff we get back.
*/
offset = -request;
- request_frames += request.frames (_film->audio_frame_rate ());
+ request_frames += request.frames_round (_film->audio_frame_rate ());
if (request_frames < 0) {
request_frames = 0;
}
audio->accumulate_frames (
all.audio.get(),
content_frame - all.frame,
- offset.frames (_film->audio_frame_rate()),
+ offset.frames_round (_film->audio_frame_rate()),
min (Frame (all.audio->frames()), request_frames)
);
}
shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
DCPTime s = t - piece->content->position ();
s = min (piece->content->length_after_trim(), s);
- return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start ()).frames (vc->video_frame_rate ());
+ /* We're returning a frame index here so we need to floor() the conversion since we want to know the frame
+ that contains t, I think
+ */
+ return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start ()).frames_floor (vc->video_frame_rate ());
}
DCPTime
{
DCPTime s = t - piece->content->position ();
s = min (piece->content->length_after_trim(), s);
- return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames (_film->audio_frame_rate ());
+ /* See notes in dcp_to_content_video */
+ return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
}
ContentTime
{
AudioDecoder::seek (t, accurate);
- _done = t.frames (_info.samplerate);
+ _done = t.frames_round (_info.samplerate);
_remaining = _info.frames - _done;
}
}
/* Compute approximate proposed length here, as it's only here that we need it */
- return (_film->length().frames (_film->video_frame_rate ()) - t->video_frames_out()) / fps;
+ return (_film->length().frames_round (_film->video_frame_rate ()) - t->video_frames_out()) / fps;
}
boost::optional<Frame> to;
if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) {
- from = _last_seek_time->frames (_video_content->video_frame_rate ());
+ from = _last_seek_time->frames_round (_video_content->video_frame_rate ());
to = to_push.front().frame;
} else if (!_decoded_video.empty ()) {
from = _decoded_video.back().frame + 1;
shared_ptr<Job> job = _job.lock ();
DCPOMATIC_ASSERT (job);
- int64_t total = _film->length().frames (_film->video_frame_rate ());
+ int64_t total = _film->length().frames_round (_film->video_frame_rate ());
if (_film->three_d ()) {
/* _full_written and so on are incremented for each eye, so we need to double the total
frames to get the correct progress.
shared_ptr<ImageContent> ic = dynamic_pointer_cast<ImageContent> (*i);
if (ic && ic->still ()) {
int const vfr = _parent->film()->video_frame_rate ();
- ic->set_video_length (_full_length->get (vfr).frames (vfr));
+ ic->set_video_length (_full_length->get (vfr).frames_round (vfr));
}
}
}
VideoContentList vc = _parent->selected_video ();
for (VideoContentList::const_iterator i = vc.begin(); i != vc.end(); ++i) {
int const vfr = _parent->film()->video_frame_rate ();
- (*i)->set_fade_in (_fade_in->get (vfr).frames (vfr));
+ (*i)->set_fade_in (_fade_in->get (vfr).frames_round (vfr));
}
}
VideoContentList vc = _parent->selected_video ();
for (VideoContentList::const_iterator i = vc.begin(); i != vc.end(); ++i) {
int const vfr = _parent->film()->video_frame_rate ();
- (*i)->set_fade_out (_fade_out->get (vfr).frames (vfr));
+ (*i)->set_fade_out (_fade_out->get (vfr).frames_round (vfr));
}
}
void seek (ContentTime t, bool accurate)
{
AudioDecoder::seek (t, accurate);
- _position = t.frames (_test_audio_content->resampled_audio_frame_rate ());
+ _position = t.frames_round (_test_audio_content->resampled_audio_frame_rate ());
}
private:
for (int64_t i = 0; i < 62000; i += 2000) {
DCPTime d (i);
ContentTime c (d, frc);
- std::cout << i << " " << d << " " << c << " " << c.frames (24.0) << " " << j << "\n";
- BOOST_CHECK_EQUAL (c.frames (24.0), j);
+ BOOST_CHECK_EQUAL (c.frames_floor (24.0), j);
++k;
if (k == 2) {
++j;
video_delay = ContentTime ();
}
- Frame const first_frame = video_delay.round_up (content->video_frame_rate ()).frames (content->video_frame_rate ());
+ Frame const first_frame = video_delay.round_up (content->video_frame_rate ()).frames_round (content->video_frame_rate ());
FFmpegDecoder decoder (content, film->log());
list<ContentVideo> a = decoder.get_video (first_frame, true);