if the data that was emitted from the decoder was not taken by the player.
This means that when the decoder moves into its end trim the position will
stay where it is (since the player does not take the data).
I can't see the point of doing this; the only use of Decoder::position()
is to decide what to pass() next (I think).
It is also inconvenient because we would like to check Decoder::position()
to decide whether to stop passing a decoder since it's in its end trim
(not doing this causes #1154).
/*
- Copyright (C) 2012-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
data = ro;
}
- _positions[stream] += Data(stream, ContentAudio (data, _positions[stream])).get_value_or(0);
+ Data(stream, ContentAudio (data, _positions[stream]));
+ _positions[stream] += data->frames();
}
/** @return Time just after the last thing that was emitted from a given stream */
/*
- Copyright (C) 2012-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
ContentTime stream_position (AudioStreamPtr stream) const;
/** @return Number of frames of data that were accepted */
- boost::signals2::signal<Frame (AudioStreamPtr, ContentAudio)> Data;
+ boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
private:
void silence (int milliseconds);
return merge (subtitles);
}
-bool
+void
Player::video (weak_ptr<Piece> wp, ContentVideo video)
{
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return false;
+ return;
}
FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
if (frc.skip && (video.frame % 2) == 1) {
- return false;
+ return;
}
/* Time of the first frame we will emit */
time < piece->content->position() ||
time >= piece->content->end() ||
(_last_video_time && time < *_last_video_time)) {
- return false;
+ return;
}
/* Fill gaps that we discover now that we have some video which needs to be emitted */
emit_video (_last_video[wp], t);
t += one_video_frame ();
}
-
- return true;
}
-/** @return Number of input frames that were `accepted'. This is the number of frames passed in
- * unless some were discarded at the end of the block.
- */
-Frame
+void
Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
{
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return 0;
+ return;
}
shared_ptr<AudioContent> content = piece->content->audio;
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
- /* We consider frames trimmed off the beginning to nevertheless be `accepted'; it's only frames trimmed
- off the end that are considered as discarded. This logic is necessary to ensure correct reel lengths,
- although the precise details escape me at the moment.
- */
- Frame accepted = content_audio.audio->frames();
-
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
if (!cut.first) {
/* This audio is entirely discarded */
- return accepted;
+ return;
}
content_audio.audio = cut.first;
time = cut.second;
} else if (time > piece->content->end()) {
/* Discard it all */
- return 0;
+ return;
} else if (end > piece->content->end()) {
Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
if (remaining_frames == 0) {
- return 0;
+ return;
}
shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
content_audio.audio = cut;
- accepted = content_audio.audio->frames();
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
_audio_merger.push (content_audio.audio, time);
DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
_stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
- return accepted;
}
void
ContentTime dcp_to_content_time (boost::shared_ptr<const Piece> piece, DCPTime t) const;
DCPTime content_time_to_dcp (boost::shared_ptr<const Piece> piece, ContentTime t) const;
boost::shared_ptr<PlayerVideo> black_player_video_frame () const;
- bool video (boost::weak_ptr<Piece>, ContentVideo);
- Frame audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
+ void video (boost::weak_ptr<Piece>, ContentVideo);
+ void audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
void image_subtitle_start (boost::weak_ptr<Piece>, ContentImageSubtitle);
void text_subtitle_start (boost::weak_ptr<Piece>, ContentTextSubtitle);
void subtitle_stop (boost::weak_ptr<Piece>, ContentTime);
/*
- Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
return;
}
- optional<bool> taken;
-
switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
- taken = Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D:
{
frame this one is.
*/
bool const same = (_last_emitted && _last_emitted.get() == frame);
- taken = Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
_last_emitted = frame;
break;
}
case VIDEO_FRAME_TYPE_3D_ALTERNATE:
- taken = Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
frame /= 2;
break;
case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
break;
case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
break;
case VIDEO_FRAME_TYPE_3D_LEFT:
- taken = Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
break;
case VIDEO_FRAME_TYPE_3D_RIGHT:
- taken = Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+ Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
break;
default:
DCPOMATIC_ASSERT (false);
}
- if (taken.get_value_or(false)) {
- _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
- }
+ _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
}
void
/*
- Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
void emit (boost::shared_ptr<const ImageProxy>, Frame frame);
/** @return true if the emitted data was accepted, false if not */
- boost::signals2::signal<bool (ContentVideo)> Data;
+ boost::signals2::signal<void (ContentVideo)> Data;
private:
boost::shared_ptr<const Content> _content;