return av_get_bytes_per_sample (audio_sample_format ());
}
-int
-FFmpegDecoder::minimal_run (boost::function<bool (optional<ContentTime>, optional<ContentTime>, int)> finished)
-{
- int frames_read = 0;
- optional<ContentTime> last_video;
- optional<ContentTime> last_audio;
-
- while (!finished (last_video, last_audio, frames_read)) {
- int r = av_read_frame (_format_context, &_packet);
- if (r < 0) {
- /* We should flush our decoders here, possibly yielding a few more frames,
- but the consequence of having to do that is too hideous to contemplate.
- Instead we give up and say that you can't seek too close to the end
- of a file.
- */
- return frames_read;
- }
-
- ++frames_read;
-
- double const time_base = av_q2d (_format_context->streams[_packet.stream_index]->time_base);
-
- if (_packet.stream_index == _video_stream) {
-
- av_frame_unref (_frame);
-
- int got_picture = 0;
- r = avcodec_decode_video2 (video_codec_context(), _frame, &got_picture, &_packet);
- if (r >= 0 && got_picture) {
- last_video = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
- }
-
- } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, _packet.stream_index)) {
- AVPacket copy_packet = _packet;
- while (copy_packet.size > 0) {
-
- int got_frame;
- r = avcodec_decode_audio4 (audio_codec_context(), _frame, &got_frame, &_packet);
- if (r >= 0 && got_frame) {
- last_audio = ContentTime::from_seconds (av_frame_get_best_effort_timestamp (_frame) * time_base) + _pts_offset;
- }
-
- copy_packet.data += r;
- copy_packet.size -= r;
- }
- }
-
- av_free_packet (&_packet);
- }
-
- return frames_read;
-}
-
-bool
-FFmpegDecoder::seek_overrun_finished (ContentTime seek, optional<ContentTime> last_video, optional<ContentTime> last_audio) const
-{
- return (last_video && last_video.get() >= seek) || (last_audio && last_audio.get() >= seek);
-}
-
-bool
-FFmpegDecoder::seek_final_finished (int n, int done) const
-{
- return n == done;
-}
-
void
FFmpegDecoder::seek_and_flush (ContentTime t)
{
initial_seek = ContentTime (0);
}
- /* Initial seek time in the video stream's timebase */
-
- seek_and_flush (initial_seek);
-
- if (!accurate) {
- /* That'll do */
- return;
- }
-
- int const N = minimal_run (boost::bind (&FFmpegDecoder::seek_overrun_finished, this, time, _1, _2));
-
seek_and_flush (initial_seek);
- if (N > 0) {
- minimal_run (boost::bind (&FFmpegDecoder::seek_final_finished, this, N - 1, _3));
- }
}
void
void maybe_add_subtitle ();
boost::shared_ptr<AudioBuffers> deinterleave_audio (uint8_t** data, int size);
- bool seek_overrun_finished (ContentTime, boost::optional<ContentTime>, boost::optional<ContentTime>) const;
- bool seek_final_finished (int, int) const;
- int minimal_run (boost::function<bool (boost::optional<ContentTime>, boost::optional<ContentTime>, int)>);
void seek_and_flush (ContentTime);
bool has_subtitle_during (ContentTimePeriod) const;
}
shared_ptr<PlayerVideo>
-Player::black_player_video_frame () const
+Player::black_player_video_frame (DCPTime time) const
{
return shared_ptr<PlayerVideo> (
new PlayerVideo (
shared_ptr<const ImageProxy> (new RawImageProxy (_black_image, _film->log ())),
+ time,
Crop (),
_video_container_size,
_video_container_size,
if (ov.empty ()) {
/* No video content at this time */
- pvf.push_back (black_player_video_frame ());
+ pvf.push_back (black_player_video_frame (time));
} else {
/* Create a PlayerVideo from the content's video at this time */
list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
if (content_video.empty ()) {
- pvf.push_back (black_player_video_frame ());
+ pvf.push_back (black_player_video_frame (time));
return pvf;
}
shared_ptr<PlayerVideo> (
new PlayerVideo (
i->image,
+ content_video_to_dcp (piece, i->frame),
content->crop (),
image_size,
_video_container_size,
return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
}
+DCPTime
+Player::content_video_to_dcp (shared_ptr<const Piece> piece, VideoFrame f) const
+{
+ DCPTime t = DCPTime::from_frames (f / piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
+ if (t < DCPTime ()) {
+ t = DCPTime ();
+ }
+
+ return t;
+}
+
AudioFrame
Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
{
std::list<PositionImage> process_content_text_subtitles (std::list<boost::shared_ptr<ContentTextSubtitle> >) const;
void update_subtitle_from_text ();
VideoFrame dcp_to_content_video (boost::shared_ptr<const Piece> piece, DCPTime t) const;
+ DCPTime content_video_to_dcp (boost::shared_ptr<const Piece> piece, VideoFrame f) const;
AudioFrame dcp_to_content_audio (boost::shared_ptr<const Piece> piece, DCPTime t) const;
ContentTime dcp_to_content_subtitle (boost::shared_ptr<const Piece> piece, DCPTime t) const;
- boost::shared_ptr<PlayerVideo> black_player_video_frame () const;
+ boost::shared_ptr<PlayerVideo> black_player_video_frame (DCPTime) const;
/** @return Pieces of content type C that overlap a specified time range in the DCP */
template<class C>
PlayerVideo::PlayerVideo (
shared_ptr<const ImageProxy> in,
+ DCPTime time,
Crop crop,
dcp::Size inter_size,
dcp::Size out_size,
ColourConversion colour_conversion
)
: _in (in)
+ , _time (time)
, _crop (crop)
, _inter_size (inter_size)
, _out_size (out_size)
PlayerVideo::PlayerVideo (shared_ptr<cxml::Node> node, shared_ptr<Socket> socket, shared_ptr<Log> log)
{
+ _time = DCPTime (node->number_child<DCPTime::Type> ("Time"));
_crop = Crop (node);
_inter_size = dcp::Size (node->number_child<int> ("InterWidth"), node->number_child<int> ("InterHeight"));
void
PlayerVideo::add_metadata (xmlpp::Node* node) const
{
+ node->add_child("Time")->add_child_text (raw_convert<string> (_time.get ()));
_crop.as_xml (node);
_in->add_metadata (node->add_child ("In"));
node->add_child("InterWidth")->add_child_text (raw_convert<string> (_inter_size.width));
class PlayerVideo
{
public:
- PlayerVideo (boost::shared_ptr<const ImageProxy>, Crop, dcp::Size, dcp::Size, Scaler const *, Eyes, Part, ColourConversion);
+ PlayerVideo (boost::shared_ptr<const ImageProxy>, DCPTime, Crop, dcp::Size, dcp::Size, Scaler const *, Eyes, Part, ColourConversion);
PlayerVideo (boost::shared_ptr<cxml::Node>, boost::shared_ptr<Socket>, boost::shared_ptr<Log>);
void set_subtitle (PositionImage);
void add_metadata (xmlpp::Node* node) const;
void send_binary (boost::shared_ptr<Socket> socket) const;
+ DCPTime time () const {
+ return _time;
+ }
+
Eyes eyes () const {
return _eyes;
}
private:
boost::shared_ptr<const ImageProxy> _in;
+ DCPTime _time;
Crop _crop;
dcp::Size _inter_size;
dcp::Size _out_size;
list<ContentVideo>
VideoDecoder::get_video (VideoFrame frame, bool accurate)
{
- if (_decoded_video.empty() || (frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1))) {
- /* Either we have no decoded data, or what we do have is a long way from what we want: seek */
+ /* At this stage, if we have get_video()ed before, _decoded_video will contain the last frame that this
+ method returned (and possibly a few more). If the requested frame is not in _decoded_video and it is not the next
+ one after the end of _decoded_video we need to seek.
+ */
+
+ if (_decoded_video.empty() || frame < _decoded_video.front().frame || frame > (_decoded_video.back().frame + 1)) {
seek (ContentTime::from_frames (frame, _video_content->video_frame_rate()), accurate);
}
/* Now enough pass() calls should either:
* (a) give us what we want, or
- * (b) hit the end of the decoder.
+ * (b) give us something after what we want, indicating that we will never get what we want, or
+ * (c) hit the end of the decoder.
*/
if (accurate) {
/* We are being accurate, so we want the right frame.
}
}
- /* Clean up decoded_video */
- while (!_decoded_video.empty() && _decoded_video.front().frame < (frame - 1)) {
+ /* Clean up _decoded_video; keep the frame we are returning, but nothing before that */
+ while (!_decoded_video.empty() && _decoded_video.front().frame < dec.front().frame) {
_decoded_video.pop_front ();
}
if (!pvf.empty ()) {
_frame = pvf.front()->image ();
_frame = _frame->scale (_frame->size(), Scaler::from_id ("fastbilinear"), PIX_FMT_RGB24, false);
+ _position = pvf.front()->time ();
} else {
_frame.reset ();
+ _position = p;
}
- _position = p;
-
set_position_text ();
_panel->Refresh ();
_panel->Update ();
shared_ptr<PlayerVideo> pvf (
new PlayerVideo (
shared_ptr<ImageProxy> (new RawImageProxy (image, log)),
+ DCPTime (),
Crop (),
dcp::Size (1998, 1080),
dcp::Size (1998, 1080),
shared_ptr<PlayerVideo> pvf (
new PlayerVideo (
shared_ptr<ImageProxy> (new RawImageProxy (image, log)),
+ DCPTime (),
Crop (),
dcp::Size (1998, 1080),
dcp::Size (1998, 1080),