#include "ffmpeg_decoder.h"
#include "filter_graph.h"
#include "audio_buffers.h"
+#include "ffmpeg_content.h"
#include "i18n.h"
using std::stringstream;
using std::list;
using std::min;
+using std::pair;
using boost::shared_ptr;
using boost::optional;
using boost::dynamic_pointer_cast;
FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> f, shared_ptr<const FFmpegContent> c, bool video, bool audio)
: Decoder (f)
- , VideoDecoder (f)
+ , VideoDecoder (f, c)
, AudioDecoder (f, c)
, SubtitleDecoder (f)
, FFmpeg (c)
, _subtitle_codec (0)
, _decode_video (video)
, _decode_audio (audio)
- , _pts_offset (0)
+ , _video_pts_offset (0)
+ , _audio_pts_offset (0)
, _just_sought (false)
{
setup_subtitle ();
- if (video && audio && c->audio_stream() && c->first_video() && c->audio_stream()->first_audio) {
- _pts_offset = compute_pts_offset (c->first_video().get(), c->audio_stream()->first_audio.get(), c->video_frame_rate());
- }
-}
+ /* Audio and video frame PTS values may not start with 0. We want
+ to fiddle them so that:
-double
-FFmpegDecoder::compute_pts_offset (double first_video, double first_audio, float video_frame_rate)
-{
- double const old_first_video = first_video;
-
- /* Round the first video to a frame boundary */
- if (fabs (rint (first_video * video_frame_rate) - first_video * video_frame_rate) > 1e-6) {
- first_video = ceil (first_video * video_frame_rate) / video_frame_rate;
+ 1. One of them starts at time 0.
+ 2. The first video PTS value ends up on a frame boundary.
+
+ Then we remove big initial gaps in PTS and we allow our
+ insertion of black frames to work.
+
+ We will do:
+ audio_pts_to_use = audio_pts_from_ffmpeg + audio_pts_offset;
+ video_pts_to_use = video_pts_from_ffmpeg + video_pts_offset;
+ */
+
+ bool const have_video = video && c->first_video();
+ bool const have_audio = audio && c->audio_stream() && c->audio_stream()->first_audio;
+
+ /* First, make one of them start at 0 */
+
+ if (have_audio && have_video) {
+ _video_pts_offset = _audio_pts_offset = - min (c->first_video().get(), c->audio_stream()->first_audio.get());
+ } else if (have_video) {
+ _video_pts_offset = - c->first_video().get();
+ } else if (have_audio) {
+ _audio_pts_offset = - c->audio_stream()->first_audio.get();
}
- /* Compute the required offset (also removing any common start delay) */
- return first_video - old_first_video - min (first_video, first_audio);
+ /* Now adjust both so that the video pts starts on a frame */
+ if (have_video && have_audio) {
+ double first_video = c->first_video().get() + _video_pts_offset;
+ double const old_first_video = first_video;
+
+ /* Round the first video up to a frame boundary */
+ if (fabs (rint (first_video * c->video_frame_rate()) - first_video * c->video_frame_rate()) > 1e-6) {
+ first_video = ceil (first_video * c->video_frame_rate()) / c->video_frame_rate ();
+ }
+
+ _video_pts_offset += first_video - old_first_video;
+ _audio_pts_offset += first_video - old_first_video;
+ }
}
FFmpegDecoder::~FFmpegDecoder ()
if (_subtitle_codec_context) {
avcodec_close (_subtitle_codec_context);
}
-}
+}
+
+void
+FFmpegDecoder::flush ()
+{
+ /* Get any remaining frames */
+
+ _packet.data = 0;
+ _packet.size = 0;
+
+ /* XXX: should we reset _packet.data and size after each *_decode_* call? */
+
+ if (_decode_video) {
+ while (decode_video_packet ()) {}
+ }
+
+ if (_ffmpeg_content->audio_stream() && _decode_audio) {
+ decode_audio_packet ();
+ }
+
+ /* Stop us being asked for any more data */
+ _video_position = _ffmpeg_content->video_length ();
+ _audio_position = _ffmpeg_content->audio_length ();
+}
void
FFmpegDecoder::pass ()
film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
- /* Get any remaining frames */
-
- _packet.data = 0;
- _packet.size = 0;
-
- /* XXX: should we reset _packet.data and size after each *_decode_* call? */
-
- if (_decode_video) {
- while (decode_video_packet ()) {}
- }
-
- if (_ffmpeg_content->audio_stream() && _decode_audio) {
- decode_audio_packet ();
- }
-
- /* Stop us being asked for any more data */
- _video_position = _ffmpeg_content->video_length ();
- _audio_position = _ffmpeg_content->audio_length ();
+ flush ();
return;
}
avcodec_get_frame_defaults (_frame);
- if (_packet.stream_index == _video_stream && _decode_video) {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+
+ int const si = _packet.stream_index;
+
+ if (si == _video_stream && _decode_video) {
decode_video_packet ();
- } else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) {
+ } else if (_ffmpeg_content->audio_stream() && si == _ffmpeg_content->audio_stream()->index (_format_context) && _decode_audio) {
decode_audio_packet ();
- } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id) {
+ } else if (_ffmpeg_content->subtitle_stream() && si == _ffmpeg_content->subtitle_stream()->index (_format_context) && film->with_subtitles ()) {
decode_subtitle_packet ();
}
FFmpegDecoder::seek (VideoContent::Frame frame, bool accurate)
{
double const time_base = av_q2d (_format_context->streams[_video_stream]->time_base);
- int64_t const vt = frame / (_ffmpeg_content->video_frame_rate() * time_base);
- av_seek_frame (_format_context, _video_stream, vt, AVSEEK_FLAG_BACKWARD);
+
+ /* If we are doing an accurate seek, our initial shot will be 5 frames (5 being
+ a number plucked from the air) earlier than we want to end up. The loop below
+ will hopefully then step through to where we want to be.
+ */
+ int initial = frame;
+
+ if (accurate) {
+ initial -= 5;
+ }
+
+ if (initial < 0) {
+ initial = 0;
+ }
+
+ /* Initial seek time in the stream's timebase */
+ int64_t const initial_vt = ((initial / _ffmpeg_content->video_frame_rate()) - _video_pts_offset) / time_base;
+
+ av_seek_frame (_format_context, _video_stream, initial_vt, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers (video_codec_context());
if (_subtitle_codec_context) {
}
_just_sought = true;
-
- if (frame == 0) {
- /* We're already there; from here on we can only seek non-zero amounts */
+ _video_position = frame;
+
+ if (frame == 0 || !accurate) {
+ /* We're already there, or we're as close as we need to be */
return;
}
- if (accurate) {
- while (1) {
- int r = av_read_frame (_format_context, &_packet);
- if (r < 0) {
- return;
- }
-
- avcodec_get_frame_defaults (_frame);
-
- if (_packet.stream_index == _video_stream) {
- int finished = 0;
- int const r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
- if (r >= 0 && finished) {
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
- if (bet >= vt) {
- _video_position = rint (
- (bet * time_base + _pts_offset) * _ffmpeg_content->video_frame_rate()
- );
- av_free_packet (&_packet);
- break;
- }
- }
- }
-
+ while (1) {
+ int r = av_read_frame (_format_context, &_packet);
+ if (r < 0) {
+ return;
+ }
+
+ if (_packet.stream_index != _video_stream) {
av_free_packet (&_packet);
+ continue;
+ }
+
+ avcodec_get_frame_defaults (_frame);
+
+ int finished = 0;
+ r = avcodec_decode_video2 (video_codec_context(), _frame, &finished, &_packet);
+ if (r >= 0 && finished) {
+ _video_position = rint (
+ (av_frame_get_best_effort_timestamp (_frame) * time_base + _video_pts_offset) * _ffmpeg_content->video_frame_rate()
+ );
+
+ if (_video_position >= (frame - 1)) {
+ av_free_packet (&_packet);
+ break;
+ }
}
+
+ av_free_packet (&_packet);
}
}
*/
AVPacket copy_packet = _packet;
-
+
while (copy_packet.size > 0) {
int frame_finished;
int const decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, ©_packet);
- if (decode_result >= 0) {
- if (frame_finished) {
-
- if (_audio_position == 0) {
- /* Where we are in the source, in seconds */
- double const pts = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
- * av_frame_get_best_effort_timestamp(_frame) - _pts_offset;
-
- if (pts > 0) {
- /* Emit some silence */
- shared_ptr<AudioBuffers> silence (
- new AudioBuffers (
- _ffmpeg_content->audio_channels(),
- pts * _ffmpeg_content->content_audio_frame_rate()
- )
- );
-
- silence->make_silent ();
- audio (silence, _audio_position);
- }
- }
+ if (decode_result < 0) {
+ shared_ptr<const Film> film = _film.lock ();
+ assert (film);
+ film->log()->log (String::compose ("avcodec_decode_audio4 failed (%1)", decode_result));
+ return;
+ }
+
+ if (frame_finished) {
+
+ if (_audio_position == 0) {
+ /* Where we are in the source, in seconds */
+ double const pts = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base)
+ * av_frame_get_best_effort_timestamp(_frame) + _audio_pts_offset;
+
+ if (pts > 0) {
+ /* Emit some silence */
+ shared_ptr<AudioBuffers> silence (
+ new AudioBuffers (
+ _ffmpeg_content->audio_channels(),
+ pts * _ffmpeg_content->content_audio_frame_rate()
+ )
+ );
- copy_packet.data += decode_result;
- copy_packet.size -= decode_result;
+ silence->make_silent ();
+ audio (silence, _audio_position);
+ }
}
+
+ int const data_size = av_samples_get_buffer_size (
+ 0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1
+ );
+
+ audio (deinterleave_audio (_frame->data, data_size), _audio_position);
}
+
+ copy_packet.data += decode_result;
+ copy_packet.size -= decode_result;
}
}
graph = *i;
}
- list<shared_ptr<Image> > images = graph->process (_frame);
+ list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
string post_process = Filter::ffmpeg_strings (_ffmpeg_content->filters()).second;
- for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+ for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
- shared_ptr<Image> image = *i;
+ shared_ptr<Image> image = i->first;
if (!post_process.empty ()) {
image = image->post_process (post_process, true);
}
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
- if (bet != AV_NOPTS_VALUE) {
+ if (i->second != AV_NOPTS_VALUE) {
- double const pts = bet * av_q2d (_format_context->streams[_video_stream]->time_base) - _pts_offset;
+ double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _video_pts_offset;
if (_just_sought) {
/* We just did a seek, so disable any attempts to correct for where we
/* This PTS is more than one frame forward in time of where we think we should be; emit
a black frame.
*/
+
+ /* XXX: I think this should be a copy of the last frame... */
boost::shared_ptr<Image> black (
- new SimpleImage (
+ new Image (
static_cast<AVPixelFormat> (_frame->format),
libdcp::Size (video_codec_context()->width, video_codec_context()->height),
true
{
boost::mutex::scoped_lock lm (_mutex);
- if (!_ffmpeg_content->subtitle_stream() || _ffmpeg_content->subtitle_stream()->id >= int (_format_context->nb_streams)) {
+ if (!_ffmpeg_content->subtitle_stream() || _ffmpeg_content->subtitle_stream()->index (_format_context) >= int (_format_context->nb_streams)) {
return;
}
- _subtitle_codec_context = _format_context->streams[_ffmpeg_content->subtitle_stream()->id]->codec;
+ _subtitle_codec_context = _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
_subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id);
if (_subtitle_codec == 0) {
/* Subtitle PTS in seconds (within the source, not taking into account any of the
source that we may have chopped off for the DCP)
*/
- double const packet_time = static_cast<double> (sub.pts) / AV_TIME_BASE;
+ double const packet_time = (static_cast<double> (sub.pts ) / AV_TIME_BASE) + _video_pts_offset;
/* hence start time for this sub */
Time const from = (packet_time + (double (sub.start_display_time) / 1e3)) * TIME_HZ;
if (rect->type != SUBTITLE_BITMAP) {
throw DecodeError (_("non-bitmap subtitles not yet supported"));
}
-
- shared_ptr<Image> image (new SimpleImage (PIX_FMT_RGBA, libdcp::Size (rect->w, rect->h), true));
+
+ /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
+ G, third B, fourth A.
+ */
+ shared_ptr<Image> image (new Image (PIX_FMT_RGBA, libdcp::Size (rect->w, rect->h), true));
/* Start of the first line in the subtitle */
uint8_t* sub_p = rect->pict.data[0];
- /* sub_p looks up into a RGB palette which is here */
+ /* sub_p looks up into a BGRA palette which is here
+ (i.e. first byte B, second G, third R, fourth A)
+ */
uint32_t const * palette = (uint32_t *) rect->pict.data[1];
/* Start of the output data */
uint32_t* out_p = (uint32_t *) image->data()[0];
-
+
for (int y = 0; y < rect->h; ++y) {
uint8_t* sub_line_p = sub_p;
uint32_t* out_line_p = out_p;
for (int x = 0; x < rect->w; ++x) {
- *out_line_p++ = palette[*sub_line_p++];
+ uint32_t const p = palette[*sub_line_p++];
+ *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000);
}
sub_p += rect->pict.linesize[0];
out_p += image->stride()[0] / sizeof (uint32_t);