summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
e7811b4)
The "accumulation" of, for example, video data when we are looking
for audio data is an *optimisation* to reduce the number of seeks.
It should not be necessary for correctness (the output should be right
even if we never kept anything except what we were looking for).
Doing this accumulation is not always an optimisation; sometimes not
doing it is better. Avoiding it when going back for subtitles is one
of these cases.
17 files changed:
/* Keep stuffing data into _decoded until we have enough data, or the subclass does not want to give us any more */
while (
(_decoded.frame > frame || (_decoded.frame + _decoded.audio->frames()) < end) &&
/* Keep stuffing data into _decoded until we have enough data, or the subclass does not want to give us any more */
while (
(_decoded.frame > frame || (_decoded.frame + _decoded.audio->frames()) < end) &&
+ !_decoder->pass (Decoder::PASS_REASON_AUDIO)
} else {
while (
_decoded.audio->frames() < length &&
} else {
while (
_decoded.audio->frames() < length &&
+ !_decoder->pass (Decoder::PASS_REASON_AUDIO)
+DCPDecoder::pass (PassReason reason)
{
if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
return true;
{
if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
return true;
double const vfr = _dcp_content->video_frame_rate ();
int64_t const frame = _next.frames_round (vfr);
double const vfr = _dcp_content->video_frame_rate ();
int64_t const frame = _next.frames_round (vfr);
- if ((*_reel)->main_picture ()) {
+ if ((*_reel)->main_picture () && reason != PASS_REASON_SUBTITLE) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
- if ((*_reel)->main_sound ()) {
+ if ((*_reel)->main_sound () && reason != PASS_REASON_SUBTITLE) {
int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
shared_ptr<const dcp::SoundFrame> sf = (*_reel)->main_sound()->asset()->get_frame (entry_point + frame);
uint8_t const * from = sf->data ();
int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
shared_ptr<const dcp::SoundFrame> sf = (*_reel)->main_sound()->asset()->get_frame (entry_point + frame);
uint8_t const * from = sf->data ();
private:
friend struct dcp_subtitle_within_dcp_test;
private:
friend struct dcp_subtitle_within_dcp_test;
+ bool pass (PassReason);
void seek (ContentTime t, bool accurate);
std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
void seek (ContentTime t, bool accurate);
std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
-DCPSubtitleDecoder::pass ()
+DCPSubtitleDecoder::pass (PassReason)
{
if (_next == _subtitles.end ()) {
return true;
{
if (_next == _subtitles.end ()) {
return true;
DCPSubtitleDecoder (boost::shared_ptr<const DCPSubtitleContent>);
protected:
DCPSubtitleDecoder (boost::shared_ptr<const DCPSubtitleContent>);
protected:
+ bool pass (PassReason);
void seek (ContentTime time, bool accurate);
private:
void seek (ContentTime time, bool accurate);
private:
*/
virtual void seek (ContentTime time, bool accurate) = 0;
*/
virtual void seek (ContentTime time, bool accurate) = 0;
- virtual bool pass () = 0;
+ enum PassReason {
+ PASS_REASON_VIDEO,
+ PASS_REASON_AUDIO,
+ PASS_REASON_SUBTITLE
+ };
+
+ virtual bool pass (PassReason) = 0;
+FFmpegDecoder::pass (PassReason reason)
{
int r = av_read_frame (_format_context, &_packet);
{
int r = av_read_frame (_format_context, &_packet);
int const si = _packet.stream_index;
shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
int const si = _packet.stream_index;
shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
- if (si == _video_stream && !_ignore_video) {
+ if (si == _video_stream && !_ignore_video && reason != PASS_REASON_SUBTITLE) {
decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
+ } else if (reason != PASS_REASON_SUBTITLE) {
decode_audio_packet ();
}
decode_audio_packet ();
}
private:
friend struct ::ffmpeg_pts_offset_test;
private:
friend struct ::ffmpeg_pts_offset_test;
+ bool pass (PassReason);
void seek (ContentTime time, bool);
void flush ();
void seek (ContentTime time, bool);
void flush ();
+ImageDecoder::pass (PassReason)
{
if (_video_position >= _image_content->video_length()) {
return true;
{
if (_video_position >= _image_content->video_length()) {
return true;
+ bool pass (PassReason);
void seek (ContentTime, bool);
boost::shared_ptr<const ImageContent> _image_content;
void seek (ContentTime, bool);
boost::shared_ptr<const ImageContent> _image_content;
+SndfileDecoder::pass (PassReason)
{
if (_remaining == 0) {
return true;
{
if (_remaining == 0) {
return true;
~SndfileDecoder ();
private:
~SndfileDecoder ();
private:
+ bool pass (PassReason);
void seek (ContentTime, bool);
int64_t _done;
void seek (ContentTime, bool);
int64_t _done;
+SubRipDecoder::pass (PassReason)
{
if (_next >= _subtitles.size ()) {
return true;
{
if (_next >= _subtitles.size ()) {
return true;
protected:
void seek (ContentTime time, bool accurate);
protected:
void seek (ContentTime time, bool accurate);
+ bool pass (PassReason);
private:
std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
private:
std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
* (a) give us what we want, or
* (b) hit the end of the decoder.
*/
* (a) give us what we want, or
* (b) hit the end of the decoder.
*/
- while (!pass () && (subs.empty() || (subs.back().period().to < sp.back().to))) {}
+ while (!pass(PASS_REASON_SUBTITLE) && (subs.empty() || (subs.back().period().to < sp.back().to))) {}
/* Now look for what we wanted in the data we have collected */
/* XXX: inefficient */
/* Now look for what we wanted in the data we have collected */
/* XXX: inefficient */
+ if (pass (PASS_REASON_VIDEO)) {
/* The decoder has nothing more for us */
break;
}
/* The decoder has nothing more for us */
break;
}
dec = decoded_video (frame);
} else {
/* Any frame will do: use the first one that comes out of pass() */
dec = decoded_video (frame);
} else {
/* Any frame will do: use the first one that comes out of pass() */
- while (_decoded_video.empty() && !pass ()) {}
+ while (_decoded_video.empty() && !pass (PASS_REASON_VIDEO)) {}
if (!_decoded_video.empty ()) {
dec.push_back (_decoded_video.front ());
}
if (!_decoded_video.empty ()) {
dec.push_back (_decoded_video.front ());
}
{
Frame const N = min (
Frame (2000),
{
Frame const N = min (
Frame (2000),