FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
: FFmpeg (c)
, Decoder (film)
+ , _filter_graphs(c->filters(), dcp::Fraction(lrint(_ffmpeg_content->video_frame_rate().get_value_or(24) * 1000), 1000))
{
if (c->video && c->video->use()) {
video = make_shared<VideoDecoder>(this, c);
}
if (c->only_text()) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
+ text.push_back (make_shared<TextDecoder>(this, c->only_text()));
+ /* XXX: we should be calling maybe_set_position() on this TextDecoder, but we can't easily find
+ * the time of the first subtitle at this point.
+ */
}
for (auto i: c->ffmpeg_audio_streams()) {
}
-bool
+FFmpegDecoder::FlushResult
FFmpegDecoder::flush ()
{
- /* Flush video and audio once */
+ LOG_DEBUG_PLAYER("Flush FFmpeg decoder: current state %1", static_cast<int>(_flush_state));
+
+ switch (_flush_state) {
+ case FlushState::CODECS:
+ if (flush_codecs() == FlushResult::DONE) {
+ LOG_DEBUG_PLAYER_NC("Finished flushing codecs");
+ _flush_state = FlushState::AUDIO_DECODER;
+ }
+ break;
+ case FlushState::AUDIO_DECODER:
+ if (audio) {
+ audio->flush();
+ }
+ LOG_DEBUG_PLAYER_NC("Finished flushing audio decoder");
+ _flush_state = FlushState::FILL;
+ break;
+ case FlushState::FILL:
+ if (flush_fill() == FlushResult::DONE) {
+ LOG_DEBUG_PLAYER_NC("Finished flushing fills");
+ return FlushResult::DONE;
+ }
+ break;
+ }
+ return FlushResult::AGAIN;
+}
+
+
+/** @return true if we have finished flushing the codecs */
+FFmpegDecoder::FlushResult
+FFmpegDecoder::flush_codecs()
+{
bool did_something = false;
if (video) {
if (decode_and_process_video_packet(nullptr)) {
}
}
- if (did_something) {
- /* We want to be called again */
- return false;
- }
+ return did_something ? FlushResult::AGAIN : FlushResult::DONE;
+}
+
+FFmpegDecoder::FlushResult
+FFmpegDecoder::flush_fill()
+{
/* Make sure all streams are the same length and round up to the next video frame */
+ bool did_something = false;
+
auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
full_length = full_length.ceil (frc.source);
- if (video) {
+ if (video && !video->ignore()) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
auto const f = full_length.frames_round (vfr);
- auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
- while (v < f) {
- video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
- ++v;
+ auto const v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
+ if (v < f) {
+ video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+ did_something = true;
}
}
- for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
- auto a = audio->stream_position(film(), i);
- /* Unfortunately if a is 0 that really means that we don't know the stream position since
- there has been no data on it since the last seek. In this case we'll just do nothing
- here. I'm not sure if that's the right idea.
- */
- if (a > ContentTime()) {
- while (a < full_length) {
+ if (audio && !audio->ignore()) {
+ for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
+ auto const a = audio->stream_position(film(), i);
+ /* Unfortunately if a is 0 that really means that we don't know the stream position since
+ there has been no data on it since the last seek. In this case we'll just do nothing
+ here. I'm not sure if that's the right idea.
+ */
+ if (a > ContentTime() && a < full_length) {
+ LOG_DEBUG_PLAYER("Flush inserts silence at %1", to_string(a));
auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
silence->make_silent ();
audio->emit (film(), i, silence, a, true);
- a += to_do;
+ did_something = true;
}
}
}
- if (audio) {
- audio->flush ();
- }
-
- return true;
+ return did_something ? FlushResult::AGAIN : FlushResult::DONE;
}
Hence it makes sense to continue here in that case.
*/
if (r < 0 && r != AVERROR_INVALIDDATA) {
+ LOG_DEBUG_PLAYER("FFpmegDecoder::pass flushes because av_read_frame returned %1", r);
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
}
av_packet_free (&packet);
- return flush ();
+ return flush() == FlushResult::DONE;
}
int const si = packet->stream_index;
/** @param data pointer to array of pointers to buffers.
* Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
*/
+static
shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (AVFrame* frame)
+deinterleave_audio(AVFrame* frame)
{
auto format = static_cast<AVSampleFormat>(frame->format);
auto audio = make_shared<AudioBuffers>(channels, frames);
auto data = audio->data();
+ if (frames == 0) {
+ return audio;
+ }
+
switch (format) {
case AV_SAMPLE_FMT_U8:
{
case AV_SAMPLE_FMT_FLTP:
{
auto p = reinterpret_cast<float**> (frame->data);
- DCPOMATIC_ASSERT (frame->channels <= channels);
- /* Sometimes there aren't as many channels in the frame as in the stream */
- for (int i = 0; i < frame->channels; ++i) {
+ for (int i = 0; i < channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = frame->channels; i < channels; ++i) {
- audio->make_silent (i);
- }
}
break;
AVSEEK_FLAG_BACKWARD
);
- {
- /* Force re-creation of filter graphs to reset them and hence to make sure
- they don't have any pre-seek frames knocking about.
- */
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
- _filter_graphs.clear ();
- }
+ /* Force re-creation of filter graphs to reset them and hence to make sure
+ they don't have any pre-seek frames knocking about.
+ */
+ _filter_graphs.clear();
if (video_codec_context ()) {
avcodec_flush_buffers (video_codec_context());
FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
{
auto frame = audio_frame (stream);
- auto data = deinterleave_audio (frame);
+ auto data = deinterleave_audio(frame);
+
+ auto const time_base = stream->stream(_format_context)->time_base;
ContentTime ct;
if (frame->pts == AV_NOPTS_VALUE) {
} else {
ct = ContentTime::from_seconds (
frame->best_effort_timestamp *
- av_q2d (stream->stream(_format_context)->time_base))
+ av_q2d(time_base))
+ _pts_offset;
+ LOG_DEBUG_PLAYER(
+ "Process audio with timestamp %1 (BET %2, timebase %3/%4, (PTS offset %5)",
+ to_string(ct),
+ frame->best_effort_timestamp,
+ time_base.num,
+ time_base.den,
+ to_string(_pts_offset)
+ );
}
_next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
data->frames(),
stream->id(),
frame->best_effort_timestamp,
- av_q2d(stream->stream(_format_context)->time_base),
+ av_q2d(time_base),
to_string(_pts_offset)
);
}
auto context = _codec_context[stream->index(_format_context)];
auto frame = audio_frame (stream);
+ LOG_DEBUG_PLAYER("Send audio packet on stream %1", stream->index(_format_context));
int r = avcodec_send_packet (context, packet);
if (r < 0) {
LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r);
r = avcodec_receive_frame (context, frame);
if (r == AVERROR(EAGAIN)) {
/* More input is required */
+ LOG_DEBUG_PLAYER_NC("EAGAIN after trying to receive audio frame");
return;
}
void
FFmpegDecoder::process_video_frame ()
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
-
- shared_ptr<VideoFilterGraph> graph;
-
- auto i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format)) {
- ++i;
- }
-
- if (i == _filter_graphs.end ()) {
- dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
- graph = make_shared<VideoFilterGraph>(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format, vfr);
- graph->setup (_ffmpeg_content->filters ());
- _filter_graphs.push_back (graph);
- LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _video_frame->width, _video_frame->height, _video_frame->format);
- } else {
- graph = *i;
- }
-
+ auto graph = _filter_graphs.get(dcp::Size(_video_frame->width, _video_frame->height), static_cast<AVPixelFormat>(_video_frame->format));
auto images = graph->process (_video_frame);
for (auto const& i: images) {
void
FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
{
+ auto context = subtitle_codec_context();
+ if (!context) {
+ return;
+ }
+
int got_subtitle;
AVSubtitle sub;
- if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
+ if (avcodec_decode_subtitle2(context, &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
return;
}
*/
ContentTime from;
from = sub_period.from + _pts_offset;
+ _have_current_subtitle = true;
if (sub_period.to) {
_current_subtitle_to = *sub_period.to + _pts_offset;
} else {
_current_subtitle_to = optional<ContentTime>();
- _have_current_subtitle = true;
}
ContentBitmapText bitmap_text(from);
/* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
(i.e. first byte B, second G, third R, fourth A)
*/
- auto const palette = rect->pict.data[1];
+ auto const* palette = rect->pict.data[1];
#else
/* Start of the first line in the subtitle */
auto sub_p = rect->data[0];
if (target_height == 0 && video_codec_context()) {
target_height = video_codec_context()->height;
}
- DCPOMATIC_ASSERT (target_width);
- DCPOMATIC_ASSERT (target_height);
+
+ int x_offset = 0;
+ int y_offset = 0;
+ if (_ffmpeg_content->video && _ffmpeg_content->video->use()) {
+ auto const crop = _ffmpeg_content->video->actual_crop();
+ target_width -= crop.left + crop.right;
+ target_height -= crop.top + crop.bottom;
+ x_offset = -crop.left;
+ y_offset = -crop.top;
+ }
+
+ DCPOMATIC_ASSERT(target_width > 0);
+ DCPOMATIC_ASSERT(target_height > 0);
+
dcpomatic::Rect<double> const scaled_rect (
- static_cast<double>(rect->x) / target_width,
- static_cast<double>(rect->y) / target_height,
+ static_cast<double>(rect->x + x_offset) / target_width,
+ static_cast<double>(rect->y + y_offset) / target_height,
static_cast<double>(rect->w) / target_width,
static_cast<double>(rect->h) / target_height
);
}
sub::RawSubtitle base;
+ auto video_size = _ffmpeg_content->video->size();
+ DCPOMATIC_ASSERT(video_size);
+
auto raw = sub::SSAReader::parse_line (
base,
text,
- _ffmpeg_content->video->size().width,
- _ffmpeg_content->video->size().height
+ video_size->width,
+ video_size->height,
+ sub::Colour(1, 1, 1)
);
for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {