using boost::shared_ptr;
AudioDecoder::AudioDecoder (Decoder* parent, shared_ptr<const AudioContent> content, bool fast, shared_ptr<Log> log)
- : _audio_content (content)
- , _ignore_audio (false)
+ : _ignore (false)
, _fast (fast)
{
BOOST_FOREACH (AudioStreamPtr i, content->streams ()) {
- _streams[i] = shared_ptr<AudioDecoderStream> (new AudioDecoderStream (_audio_content, i, parent, log));
+ _streams[i] = shared_ptr<AudioDecoderStream> (new AudioDecoderStream (content, i, parent, log));
}
}
ContentAudio
-AudioDecoder::get_audio (AudioStreamPtr stream, Frame frame, Frame length, bool accurate)
+AudioDecoder::get (AudioStreamPtr stream, Frame frame, Frame length, bool accurate)
{
return _streams[stream]->get (frame, length, accurate);
}
void
-AudioDecoder::audio (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data, ContentTime time)
+AudioDecoder::give (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data, ContentTime time)
{
- if (_ignore_audio) {
+ if (_ignore) {
return;
}
}
}
-/** Set this player never to produce any audio data */
+/** Set this decoder never to produce any data */
void
-AudioDecoder::set_ignore_audio ()
+AudioDecoder::set_ignore ()
{
- _ignore_audio = true;
+ _ignore = true;
}
* @param accurate true to try hard to return frames from exactly `frame', false if we don't mind nearby frames.
* @return Time-stamped audio data which may or may not be from the location (and of the length) requested.
*/
- ContentAudio get_audio (AudioStreamPtr stream, Frame time, Frame length, bool accurate);
+ ContentAudio get (AudioStreamPtr stream, Frame time, Frame length, bool accurate);
- void set_ignore_audio ();
+ void set_ignore ();
bool fast () const {
return _fast;
}
- void audio (AudioStreamPtr stream, boost::shared_ptr<const AudioBuffers>, ContentTime);
+ void give (AudioStreamPtr stream, boost::shared_ptr<const AudioBuffers>, ContentTime);
void flush ();
void seek (ContentTime t, bool accurate);
private:
- boost::shared_ptr<const AudioContent> _audio_content;
/** An AudioDecoderStream object to manage each stream in _audio_content */
std::map<AudioStreamPtr, boost::shared_ptr<AudioDecoderStream> > _streams;
- bool _ignore_audio;
+ bool _ignore;
bool _fast;
};
shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
if (mono) {
- video->video (shared_ptr<ImageProxy> (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), offset + frame);
+ video->give (shared_ptr<ImageProxy> (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), offset + frame);
} else {
- video->video (
+ video->give (
shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)),
offset + frame
);
- video->video (
+ video->give (
shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)),
offset + frame
);
}
}
- audio->audio (_dcp_content->audio->stream(), data, ContentTime::from_frames (offset, vfr) + _next);
+ audio->give (_dcp_content->audio->stream(), data, ContentTime::from_frames (offset, vfr) + _next);
}
if ((*_reel)->main_subtitle ()) {
if (!subs.empty ()) {
/* XXX: assuming that all `subs' are at the same time; maybe this is ok */
- subtitle->text_subtitle (
+ subtitle->give_text (
ContentTimePeriod (
ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
++_next;
}
- subtitle->text_subtitle (p, s);
+ subtitle->give_text (p, s);
return false;
}
int const si = _packet.stream_index;
shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
- if (_video_stream && si == _video_stream.get() && !video->ignore_video() && (accurate || reason != PASS_REASON_SUBTITLE)) {
+ if (_video_stream && si == _video_stream.get() && !video->ignore() && (accurate || reason != PASS_REASON_SUBTITLE)) {
decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
}
if (data->frames() > 0) {
- audio->audio (*stream, data, ct);
+ audio->give (*stream, data, ct);
}
}
if (i->second != AV_NOPTS_VALUE) {
double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
- video->video (
+ video->give (
shared_ptr<ImageProxy> (new RawImageProxy (image)),
llrint (pts * _ffmpeg_content->active_video_frame_rate ())
);
static_cast<double> (rect->h) / vs.height
);
- subtitle->image_subtitle (period, image, scaled_rect);
+ subtitle->give_image (period, image, scaled_rect);
}
void
}
}
- subtitle->text_subtitle (period, ss);
+ subtitle->give_text (period, ss);
}
}
}
- video->video (_image, _video_position);
+ video->give (_image, _video_position);
++_video_position;
return false;
}
}
if (decoder->video && _ignore_video) {
- decoder->video->set_ignore_video ();
+ decoder->video->set_ignore ();
}
if (decoder->audio && _ignore_audio) {
- decoder->audio->set_ignore_audio ();
+ decoder->audio->set_ignore ();
}
_pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
if (use) {
/* We want to use this piece */
- list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
+ list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
if (content_video.empty ()) {
pvf.push_back (black_player_video_frame (time));
} else {
}
} else {
/* Discard unused video */
- decoder->get_video (dcp_to_content_video (piece, time), accurate);
+ decoder->get (dcp_to_content_video (piece, time), accurate);
}
}
}
}
/* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
- ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
+ ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
/* Gain */
if (i->content->audio->gain() != 0) {
/* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
- list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting, accurate);
+ list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
/* Apply content's subtitle offsets */
ps.image.push_back (i->sub);
}
- list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting, accurate);
+ list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
BOOST_FOREACH (ContentTextSubtitle& ts, text) {
BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
}
data->set_frames (this_time);
- audio->audio (_sndfile_content->audio->stream (), data, ContentTime::from_frames (_done, _info.samplerate));
+ audio->give (_sndfile_content->audio->stream (), data, ContentTime::from_frames (_done, _info.samplerate));
_done += this_time;
_remaining -= this_time;
SubtitleDecoder::SubtitleDecoder (
Decoder* parent,
shared_ptr<const SubtitleContent> c,
- function<list<ContentTimePeriod> (ContentTimePeriod, bool)> image_subtitles_during,
- function<list<ContentTimePeriod> (ContentTimePeriod, bool)> text_subtitles_during
+ function<list<ContentTimePeriod> (ContentTimePeriod, bool)> image_during,
+ function<list<ContentTimePeriod> (ContentTimePeriod, bool)> text_during
)
: _parent (parent)
- , _subtitle_content (c)
- , _image_subtitles_during (image_subtitles_during)
- , _text_subtitles_during (text_subtitles_during)
+ , _content (c)
+ , _image_during (image_during)
+ , _text_during (text_during)
{
}
* of the video frame)
*/
void
-SubtitleDecoder::image_subtitle (ContentTimePeriod period, shared_ptr<Image> image, dcpomatic::Rect<double> rect)
+SubtitleDecoder::give_image (ContentTimePeriod period, shared_ptr<Image> image, dcpomatic::Rect<double> rect)
{
- _decoded_image_subtitles.push_back (ContentImageSubtitle (period, image, rect));
+ _decoded_image.push_back (ContentImageSubtitle (period, image, rect));
}
void
-SubtitleDecoder::text_subtitle (ContentTimePeriod period, list<dcp::SubtitleString> s)
+SubtitleDecoder::give_text (ContentTimePeriod period, list<dcp::SubtitleString> s)
{
- _decoded_text_subtitles.push_back (ContentTextSubtitle (period, s));
+ _decoded_text.push_back (ContentTextSubtitle (period, s));
}
/** @param sp Full periods of subtitles that are showing or starting during the specified period */
/* Discard anything in _decoded_image_subtitles that is outside 5 seconds either side of period */
- list<ContentImageSubtitle>::iterator i = _decoded_image_subtitles.begin();
- while (i != _decoded_image_subtitles.end()) {
+ list<ContentImageSubtitle>::iterator i = _decoded_image.begin();
+ while (i != _decoded_image.end()) {
list<ContentImageSubtitle>::iterator tmp = i;
++tmp;
i->period().to < (period.from - ContentTime::from_seconds (5)) ||
i->period().from > (period.to + ContentTime::from_seconds (5))
) {
- _decoded_image_subtitles.erase (i);
+ _decoded_image.erase (i);
}
i = tmp;
}
list<ContentTextSubtitle>
-SubtitleDecoder::get_text_subtitles (ContentTimePeriod period, bool starting, bool accurate)
+SubtitleDecoder::get_text (ContentTimePeriod period, bool starting, bool accurate)
{
- return get<ContentTextSubtitle> (_decoded_text_subtitles, _text_subtitles_during (period, starting), period, starting, accurate);
+ return get<ContentTextSubtitle> (_decoded_text, _text_during (period, starting), period, starting, accurate);
}
list<ContentImageSubtitle>
-SubtitleDecoder::get_image_subtitles (ContentTimePeriod period, bool starting, bool accurate)
+SubtitleDecoder::get_image (ContentTimePeriod period, bool starting, bool accurate)
{
- return get<ContentImageSubtitle> (_decoded_image_subtitles, _image_subtitles_during (period, starting), period, starting, accurate);
+ return get<ContentImageSubtitle> (_decoded_image, _image_during (period, starting), period, starting, accurate);
}
void
SubtitleDecoder::seek (ContentTime, bool)
{
- _decoded_text_subtitles.clear ();
- _decoded_image_subtitles.clear ();
+ _decoded_text.clear ();
+ _decoded_image.clear ();
}
SubtitleDecoder (
Decoder* parent,
boost::shared_ptr<const SubtitleContent>,
- boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> image_subtitles_during,
- boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> text_subtitles_during
+ boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> image_during,
+ boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> text_during
);
- std::list<ContentImageSubtitle> get_image_subtitles (ContentTimePeriod period, bool starting, bool accurate);
- std::list<ContentTextSubtitle> get_text_subtitles (ContentTimePeriod period, bool starting, bool accurate);
+ std::list<ContentImageSubtitle> get_image (ContentTimePeriod period, bool starting, bool accurate);
+ std::list<ContentTextSubtitle> get_text (ContentTimePeriod period, bool starting, bool accurate);
void seek (ContentTime, bool);
- void image_subtitle (ContentTimePeriod period, boost::shared_ptr<Image>, dcpomatic::Rect<double>);
- void text_subtitle (ContentTimePeriod period, std::list<dcp::SubtitleString>);
+ void give_image (ContentTimePeriod period, boost::shared_ptr<Image>, dcpomatic::Rect<double>);
+ void give_text (ContentTimePeriod period, std::list<dcp::SubtitleString>);
boost::shared_ptr<const SubtitleContent> content () const {
- return _subtitle_content;
+ return _content;
}
private:
Decoder* _parent;
- std::list<ContentImageSubtitle> _decoded_image_subtitles;
- std::list<ContentTextSubtitle> _decoded_text_subtitles;
- boost::shared_ptr<const SubtitleContent> _subtitle_content;
+ std::list<ContentImageSubtitle> _decoded_image;
+ std::list<ContentTextSubtitle> _decoded_text;
+ boost::shared_ptr<const SubtitleContent> _content;
template <class T>
std::list<T> get (std::list<T> const & subs, std::list<ContentTimePeriod> const & sp, ContentTimePeriod period, bool starting, bool accurate);
- boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> _image_subtitles_during;
- boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> _text_subtitles_during;
+ boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> _image_during;
+ boost::function<std::list<ContentTimePeriod> (ContentTimePeriod, bool)> _text_during;
};
#endif
}
}
- subtitle->text_subtitle (content_time_period (_subtitles[_next]), out);
+ subtitle->give_text (content_time_period (_subtitles[_next]), out);
++_next;
return false;
#include "film.h"
#include "log.h"
#include "compose.hpp"
+#include <boost/foreach.hpp>
#include <iostream>
#include "i18n.h"
#ifdef DCPOMATIC_DEBUG
: test_gaps (0)
, _parent (parent),
- _video_content (c)
+ _content (c)
#else
: _parent (parent)
- , _video_content (c)
+ , _content (c)
#endif
, _log (log)
, _last_seek_accurate (true)
- , _ignore_video (false)
+ , _ignore (false)
{
- _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_content->video->size(), true));
+ _black_image.reset (new Image (AV_PIX_FMT_RGB24, _content->video->size(), true));
_black_image->make_black ();
}
list<ContentVideo>
-VideoDecoder::decoded_video (Frame frame)
+VideoDecoder::decoded (Frame frame)
{
list<ContentVideo> output;
- for (list<ContentVideo>::const_iterator i = _decoded_video.begin(); i != _decoded_video.end(); ++i) {
- if (i->frame == frame) {
- output.push_back (*i);
+ BOOST_FOREACH (ContentVideo const & i, _decoded) {
+ if (i.frame == frame) {
+ output.push_back (i);
}
}
* @return Frames; there may be none (if there is no video there), 1 for 2D or 2 for 3D.
*/
list<ContentVideo>
-VideoDecoder::get_video (Frame frame, bool accurate)
+VideoDecoder::get (Frame frame, bool accurate)
{
if (_no_data_frame && frame >= _no_data_frame.get()) {
return list<ContentVideo> ();
}
- /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this
- method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next
- one after the end of _decoded_video we need to seek.
+ /* At this stage, if we have get_video()ed before, _decoded will contain the last frame that this
+ method returned (and possibly a few more). If the requested frame is not in _decoded and it is not the next
+ one after the end of _decoded we need to seek.
*/
_log->log (String::compose ("VD has request for %1", frame), LogEntry::TYPE_DEBUG_DECODE);
- if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) {
- seek (ContentTime::from_frames (frame, _video_content->active_video_frame_rate()), accurate);
+ if (_decoded.empty() || frame < _decoded.front().frame || frame > (_decoded.back().frame + 1)) {
+ seek (ContentTime::from_frames (frame, _content->active_video_frame_rate()), accurate);
}
list<ContentVideo> dec;
bool no_data = false;
while (true) {
- if (!decoded_video(frame).empty ()) {
+ if (!decoded(frame).empty ()) {
/* We got what we want */
break;
}
break;
}
- if (!_decoded_video.empty() && _decoded_video.front().frame > frame) {
+ if (!_decoded.empty() && _decoded.front().frame > frame) {
/* We're never going to get the frame we want. Perhaps the caller is asking
* for a video frame before the content's video starts (if its audio
* begins before its video, for example).
}
}
- dec = decoded_video (frame);
+ dec = decoded (frame);
if (no_data && dec.empty()) {
_no_data_frame = frame;
} else {
/* Any frame will do: use the first one that comes out of pass() */
- while (_decoded_video.empty() && !_parent->pass (Decoder::PASS_REASON_VIDEO, accurate)) {}
- if (!_decoded_video.empty ()) {
- dec.push_back (_decoded_video.front ());
+ while (_decoded.empty() && !_parent->pass (Decoder::PASS_REASON_VIDEO, accurate)) {}
+ if (!_decoded.empty ()) {
+ dec.push_back (_decoded.front ());
}
}
- /* Clean up _decoded_video; keep the frame we are returning, if any (which may have two images
+ /* Clean up _decoded; keep the frame we are returning, if any (which may have two images
for 3D), but nothing before that */
- while (!_decoded_video.empty() && !dec.empty() && _decoded_video.front().frame < dec.front().frame) {
- _decoded_video.pop_front ();
+ while (!_decoded.empty() && !dec.empty() && _decoded.front().frame < dec.front().frame) {
+ _decoded.pop_front ();
}
return dec;
}
-/** Fill _decoded_video from `from' up to, but not including, `to' with
+/** Fill _decoded from `from' up to, but not including, `to' with
* a frame for one particular Eyes value (which could be EYES_BOTH,
* EYES_LEFT or EYES_RIGHT)
*/
Part filler_part = PART_WHOLE;
/* ...unless there's some video we can fill with */
- if (!_decoded_video.empty ()) {
- filler_image = _decoded_video.back().image;
- filler_part = _decoded_video.back().part;
+ if (!_decoded.empty ()) {
+ filler_image = _decoded.back().image;
+ filler_part = _decoded.back().part;
}
for (Frame i = from; i < to; ++i) {
#ifdef DCPOMATIC_DEBUG
test_gaps++;
#endif
- _decoded_video.push_back (
+ _decoded.push_back (
ContentVideo (filler_image, eye, filler_part, i)
);
}
}
-/** Fill _decoded_video from `from' up to, but not including, `to'
+/** Fill _decoded from `from' up to, but not including, `to'
* adding both left and right eye frames.
*/
void
Part filler_right_part = PART_WHOLE;
/* ...unless there's some video we can fill with */
- for (list<ContentVideo>::const_reverse_iterator i = _decoded_video.rbegin(); i != _decoded_video.rend(); ++i) {
+ for (list<ContentVideo>::const_reverse_iterator i = _decoded.rbegin(); i != _decoded.rend(); ++i) {
if (i->eyes == EYES_LEFT && !filler_left_image) {
filler_left_image = i->image;
filler_left_part = i->part;
}
Frame filler_frame = from;
- Eyes filler_eye = _decoded_video.empty() ? EYES_LEFT : _decoded_video.back().eyes;
+ Eyes filler_eye = _decoded.empty() ? EYES_LEFT : _decoded.back().eyes;
- if (_decoded_video.empty ()) {
+ if (_decoded.empty ()) {
filler_frame = 0;
filler_eye = EYES_LEFT;
- } else if (_decoded_video.back().eyes == EYES_LEFT) {
- filler_frame = _decoded_video.back().frame;
+ } else if (_decoded.back().eyes == EYES_LEFT) {
+ filler_frame = _decoded.back().frame;
filler_eye = EYES_RIGHT;
- } else if (_decoded_video.back().eyes == EYES_RIGHT) {
- filler_frame = _decoded_video.back().frame + 1;
+ } else if (_decoded.back().eyes == EYES_RIGHT) {
+ filler_frame = _decoded.back().frame + 1;
filler_eye = EYES_LEFT;
}
test_gaps++;
#endif
- _decoded_video.push_back (
+ _decoded.push_back (
ContentVideo (
filler_eye == EYES_LEFT ? filler_left_image : filler_right_image,
filler_eye,
}
}
-/** Called by subclasses when they have a video frame ready */
+/** Called by decoder classes when they have a video frame ready */
void
-VideoDecoder::video (shared_ptr<const ImageProxy> image, Frame frame)
+VideoDecoder::give (shared_ptr<const ImageProxy> image, Frame frame)
{
- if (_ignore_video) {
+ if (_ignore) {
return;
}
_log->log (String::compose ("VD receives %1", frame), LogEntry::TYPE_DEBUG_DECODE);
- /* Work out what we are going to push into _decoded_video next */
+ /* Work out what we are going to push into _decoded next */
list<ContentVideo> to_push;
- switch (_video_content->video->frame_type ()) {
+ switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
to_push.push_back (ContentVideo (image, EYES_BOTH, PART_WHOLE, frame));
break;
/* We receive the same frame index twice for 3D-alternate; hence we know which
frame this one is.
*/
- bool const same = (!_decoded_video.empty() && frame == _decoded_video.back().frame);
+ bool const same = (!_decoded.empty() && frame == _decoded.back().frame);
to_push.push_back (ContentVideo (image, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE, frame));
break;
}
}
/* Now VideoDecoder is required never to have gaps in the frames that it presents
- via get_video(). Hence we need to fill in any gap between the last thing in _decoded_video
+ via get_video(). Hence we need to fill in any gap between the last thing in _decoded
and the things we are about to push.
*/
optional<Frame> from;
optional<Frame> to;
- if (_decoded_video.empty() && _last_seek_time && _last_seek_accurate) {
- from = _last_seek_time->frames_round (_video_content->active_video_frame_rate ());
+ if (_decoded.empty() && _last_seek_time && _last_seek_accurate) {
+ from = _last_seek_time->frames_round (_content->active_video_frame_rate ());
to = to_push.front().frame;
- } else if (!_decoded_video.empty ()) {
- from = _decoded_video.back().frame + 1;
+ } else if (!_decoded.empty ()) {
+ from = _decoded.back().frame + 1;
to = to_push.front().frame;
}
}
if (from) {
- switch (_video_content->video->frame_type ()) {
+ switch (_content->video->frame_type ()) {
case VIDEO_FRAME_TYPE_2D:
fill_one_eye (from.get(), to.get (), EYES_BOTH);
break;
}
}
- copy (to_push.begin(), to_push.end(), back_inserter (_decoded_video));
+ copy (to_push.begin(), to_push.end(), back_inserter (_decoded));
/* We can't let this build up too much or we will run out of memory. There is a
- `best' value for the allowed size of _decoded_video which balances memory use
+ `best' value for the allowed size of _decoded which balances memory use
with decoding efficiency (lack of seeks). Throwing away video frames here
is not a problem for correctness, so do it.
*/
- while (_decoded_video.size() > 96) {
- _decoded_video.pop_back ();
+ while (_decoded.size() > 96) {
+ _decoded.pop_back ();
}
}
void
VideoDecoder::seek (ContentTime s, bool accurate)
{
- _decoded_video.clear ();
+ _decoded.clear ();
_last_seek_time = s;
_last_seek_accurate = accurate;
}
-/** Set this player never to produce any video data */
+/** Set this decoder never to produce any data */
void
-VideoDecoder::set_ignore_video ()
+VideoDecoder::set_ignore ()
{
- _ignore_video = true;
+ _ignore = true;
}
public:
VideoDecoder (Decoder* parent, boost::shared_ptr<const Content> c, boost::shared_ptr<Log> log);
- std::list<ContentVideo> get_video (Frame frame, bool accurate);
+ std::list<ContentVideo> get (Frame frame, bool accurate);
- void set_ignore_video ();
- bool ignore_video () const {
- return _ignore_video;
+ void set_ignore ();
+ bool ignore () const {
+ return _ignore;
}
#ifdef DCPOMATIC_DEBUG
friend void ffmpeg_decoder_sequential_test_one (boost::filesystem::path file, float fps, int gaps, int video_length);
void seek (ContentTime time, bool accurate);
- void video (boost::shared_ptr<const ImageProxy>, Frame frame);
+ void give (boost::shared_ptr<const ImageProxy>, Frame frame);
private:
- std::list<ContentVideo> decoded_video (Frame frame);
+ std::list<ContentVideo> decoded (Frame frame);
void fill_one_eye (Frame from, Frame to, Eyes);
void fill_both_eyes (Frame from, Frame to, Eyes);
Decoder* _parent;
- boost::shared_ptr<const Content> _video_content;
+ boost::shared_ptr<const Content> _content;
boost::shared_ptr<Log> _log;
- std::list<ContentVideo> _decoded_video;
+ std::list<ContentVideo> _decoded;
boost::shared_ptr<Image> _black_image;
boost::optional<ContentTime> _last_seek_time;
bool _last_seek_accurate;
/** true if this decoder should ignore all video; i.e. never produce any */
- bool _ignore_video;
+ bool _ignore;
/** if set, this is a frame for which we got no data because the Decoder said
* it has no more to give.
*/
sizer->Add (buttons, wxSizerFlags().Expand().DoubleBorder());
}
- list<ContentTextSubtitle> subs = decoder->subtitle->get_text_subtitles (ContentTimePeriod (ContentTime(), ContentTime::max ()), true, true);
+ list<ContentTextSubtitle> subs = decoder->subtitle->get_text (ContentTimePeriod (ContentTime(), ContentTime::max ()), true, true);
FrameRateChange const frc = film->active_frame_rate_change (position);
int n = 0;
for (list<ContentTextSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
}
}
- audio->audio (_test_audio_content->audio->stream(), buffers, ContentTime::from_frames (_position, 48000));
+ audio->give (_test_audio_content->audio->stream(), buffers, ContentTime::from_frames (_position, 48000));
_position += N;
return N < 2000;
get (Frame from, Frame length)
{
decoder->seek (ContentTime::from_frames (from, content->audio->resampled_frame_rate ()), true);
- ContentAudio ca = decoder->audio->get_audio (content->audio->stream(), from, length, true);
+ ContentAudio ca = decoder->audio->get (content->audio->stream(), from, length, true);
BOOST_CHECK_EQUAL (ca.frame, from);
return ca;
}
BOOST_CHECK_EQUAL (ctp.back().from, ContentTime::from_seconds (25 + 12 * 0.04));
BOOST_CHECK_EQUAL (ctp.back().to, ContentTime::from_seconds (26 + 4 * 0.04));
- list<ContentTextSubtitle> subs = decoder->subtitle->get_text_subtitles (
+ list<ContentTextSubtitle> subs = decoder->subtitle->get_text (
ContentTimePeriod (
ContentTime::from_seconds (25),
ContentTime::from_seconds (26)
check (shared_ptr<FFmpegDecoder> decoder, int frame)
{
list<ContentVideo> v;
- v = decoder->video->get_video (frame, true);
+ v = decoder->video->get (frame, true);
BOOST_CHECK (v.size() == 1);
BOOST_CHECK_EQUAL (v.front().frame, frame);
}
shared_ptr<Log> log (new NullLog);
shared_ptr<FFmpegDecoder> decoder (new FFmpegDecoder (content, log, false));
- BOOST_REQUIRE (decoder->video->_video_content->video_frame_rate());
- BOOST_CHECK_CLOSE (decoder->video->_video_content->video_frame_rate().get(), fps, 0.01);
+ BOOST_REQUIRE (decoder->video->_content->video_frame_rate());
+ BOOST_CHECK_CLOSE (decoder->video->_content->video_frame_rate().get(), fps, 0.01);
#ifdef DCPOMATIC_DEBUG
decoder->video->test_gaps = 0;
#endif
for (Frame i = 0; i < video_length; ++i) {
list<ContentVideo> v;
- v = decoder->video->get_video (i, true);
+ v = decoder->video->get (i, true);
BOOST_REQUIRE_EQUAL (v.size(), 1U);
BOOST_CHECK_EQUAL (v.front().frame, i);
}
Frame const first_frame = video_delay.round_up (content->active_video_frame_rate ()).frames_round (content->active_video_frame_rate ());
FFmpegDecoder decoder (content, film->log(), false);
- list<ContentVideo> a = decoder.video->get_video (first_frame, true);
+ list<ContentVideo> a = decoder.video->get (first_frame, true);
BOOST_CHECK (a.size() == 1);
BOOST_CHECK_EQUAL (a.front().frame, first_frame);
}
ImageDecoder decoder (c, film->log());
decoder.video->fill_one_eye (0, 4, EYES_BOTH);
- BOOST_CHECK_EQUAL (decoder.video->_decoded_video.size(), 4U);
- list<ContentVideo>::iterator i = decoder.video->_decoded_video.begin();
+ BOOST_CHECK_EQUAL (decoder.video->_decoded.size(), 4U);
+ list<ContentVideo>::iterator i = decoder.video->_decoded.begin();
for (int j = 0; j < 4; ++j) {
BOOST_CHECK_EQUAL (i->frame, j);
++i;
}
- decoder.video->_decoded_video.clear ();
+ decoder.video->_decoded.clear ();
decoder.video->fill_one_eye (0, 7, EYES_BOTH);
- BOOST_CHECK_EQUAL (decoder.video->_decoded_video.size(), 7);
- i = decoder.video->_decoded_video.begin();
+ BOOST_CHECK_EQUAL (decoder.video->_decoded.size(), 7);
+ i = decoder.video->_decoded.begin();
for (int j = 0; j < 7; ++j) {
BOOST_CHECK_EQUAL (i->frame, j);
++i;
ImageDecoder decoder (c, film->log());
decoder.video->fill_both_eyes (0, 4, EYES_LEFT);
- BOOST_CHECK_EQUAL (decoder.video->_decoded_video.size(), 8);
- list<ContentVideo>::iterator i = decoder.video->_decoded_video.begin();
+ BOOST_CHECK_EQUAL (decoder.video->_decoded.size(), 8);
+ list<ContentVideo>::iterator i = decoder.video->_decoded.begin();
for (int j = 0; j < 8; ++j) {
BOOST_CHECK_EQUAL (i->frame, j / 2);
BOOST_CHECK_EQUAL (i->eyes, (j % 2) == 0 ? EYES_LEFT : EYES_RIGHT);
}
decoder.video->fill_both_eyes (0, 7, EYES_RIGHT);
- BOOST_CHECK_EQUAL (decoder.video->_decoded_video.size(), 15);
- i = decoder.video->_decoded_video.begin();
+ BOOST_CHECK_EQUAL (decoder.video->_decoded.size(), 15);
+ i = decoder.video->_decoded.begin();
for (int j = 0; j < 15; ++j) {
BOOST_CHECK_EQUAL (i->frame, j / 2);
BOOST_CHECK_EQUAL (i->eyes, (j % 2) == 0 ? EYES_LEFT : EYES_RIGHT);