using boost::dynamic_pointer_cast;
using dcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
: FFmpeg (c)
, _log (log)
+ , _have_current_subtitle (false)
{
if (c->video) {
video.reset (new VideoDecoder (this, c, log));
_pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
+ /* It doesn't matter what size or pixel format this is, it just needs to be black */
+ _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
+ _black_image->make_black ();
} else {
_pts_offset = ContentTime ();
}
if (c->audio) {
- audio.reset (new AudioDecoder (this, c->audio, log));
+ audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
if (c->subtitle) {
- subtitle.reset (new SubtitleDecoder (this, c->subtitle, log));
+ /* XXX: this time here should be the time of the first subtitle, not 0 */
+ subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime()));
}
+
+ _next_time.resize (_format_context->nb_streams);
}
void
if (audio) {
decode_audio_packet ();
}
+
+ /* Make sure all streams are the same length and round up to the next video frame */
+
+ FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position());
+ ContentTime full_length (_ffmpeg_content->full_length(), frc);
+ full_length = full_length.ceil (frc.source);
+ if (video) {
+ double const vfr = _ffmpeg_content->video_frame_rate().get();
+ Frame const f = full_length.frames_round (vfr);
+ Frame v = video->position().frames_round (vfr) + 1;
+ while (v < f) {
+ video->emit (shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
+ ++v;
+ }
+ }
+
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
+ ContentTime a = audio->stream_position(i);
+ /* Unfortunately if a is 0 that really means that we don't know the stream position since
+ there has been no data on it since the last seek. In this case we'll just do nothing
+ here. I'm not sure if that's the right idea.
+ */
+ if (a > ContentTime()) {
+ while (a < full_length) {
+ ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
+ shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
+ silence->make_silent ();
+ audio->emit (i, silence, a);
+ a += to_do;
+ }
+ }
+ }
+
+ if (audio) {
+ audio->flush ();
+ }
}
bool
if (_video_stream && si == _video_stream.get() && !video->ignore()) {
decode_video_packet ();
- } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
+ } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !subtitle->ignore()) {
decode_subtitle_packet ();
} else {
decode_audio_packet ();
of the block that do not form a complete sample or frame they will be dropped.
*/
int const total_samples = size / bytes_per_audio_sample (stream);
- int const frames = total_samples / stream->channels();
- shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
+ int const channels = stream->channels();
+ int const frames = total_samples / channels;
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
+ float** data = audio->data();
switch (audio_sample_format (stream)) {
case AV_SAMPLE_FMT_U8:
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = float(*p++) / (1 << 23);
+ data[channel][sample] = float(*p++) / (1 << 23);
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = float(*p++) / (1 << 15);
+ data[channel][sample] = float(*p++) / (1 << 15);
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_S16P:
{
int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
- for (int i = 0; i < stream->channels(); ++i) {
+ for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
- audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
+ data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
}
}
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = static_cast<float>(*p++) / 2147483648;
+ data[channel][sample] = static_cast<float>(*p++) / 2147483648;
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_S32P:
{
int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
- for (int i = 0; i < stream->channels(); ++i) {
+ for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
- audio->data(i)[j] = static_cast<float>(p[i][j]) / 2147483648;
+ data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
}
}
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = *p++;
+ data[channel][sample] = *p++;
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
float** p = reinterpret_cast<float**> (_frame->data);
/* Sometimes there aren't as many channels in the _frame as in the stream */
for (int i = 0; i < _frame->channels; ++i) {
- memcpy (audio->data(i), p[i], frames * sizeof(float));
+ memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = _frame->channels; i < stream->channels(); ++i) {
+ for (int i = _frame->channels; i < channels; ++i) {
audio->make_silent (i);
}
}
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
}
+
+ _have_current_subtitle = false;
}
void
*/
AVPacket copy_packet = _packet;
+ int const stream_index = copy_packet.stream_index;
/* XXX: inefficient */
vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
- while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
+ while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
++stream;
}
}
if (frame_finished) {
- ContentTime ct = ContentTime::from_seconds (
- av_frame_get_best_effort_timestamp (_frame) *
- av_q2d ((*stream)->stream (_format_context)->time_base))
- + _pts_offset;
-
shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
+ ContentTime ct;
+ if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
+ /* In some streams we see not every frame coming through with a timestamp; for those
+ that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
+ particularly noticeable with TrueHD streams (see #1111).
+ */
+ ct = *_next_time[stream_index];
+ } else {
+ ct = ContentTime::from_seconds (
+ av_frame_get_best_effort_timestamp (_frame) *
+ av_q2d ((*stream)->stream (_format_context)->time_base))
+ + _pts_offset;
+ }
+
+ _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
+
if (ct < ContentTime ()) {
/* Discard audio data that comes before time 0 */
Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
}
if (ct < ContentTime()) {
- LOG_WARNING ("Crazy timestamp %1", to_string (ct));
+ LOG_WARNING (
+ "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
+ to_string(ct),
+ data->frames(),
+ copy_packet.stream_index,
+ copy_packet.pts,
+ av_frame_get_best_effort_timestamp(_frame),
+ av_q2d((*stream)->stream(_format_context)->time_base),
+ to_string(_pts_offset)
+ );
}
/* Give this data provided there is some, and its time is sane */
return;
}
+ /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
+ if (_have_current_subtitle) {
+ if (_current_subtitle_to) {
+ subtitle->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
+ } else {
+ subtitle->emit_stop (subtitle_period(sub).from + _pts_offset);
+ }
+ _have_current_subtitle = false;
+ }
+
if (sub.num_rects <= 0) {
- /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
- indicate that the previous subtitle should stop. We can ignore it here.
- */
+ /* Nothing new in this subtitle */
return;
}
source that we may have chopped off for the DCP).
*/
FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
- ContentTimePeriod period;
- period.from = sub_period.from + _pts_offset;
- /* We can't trust the `to' time from sub_period as there are some decoders which
- give a sub_period time for `to' which is subsequently overridden by a `stop' subtitle;
- see also FFmpegExaminer.
- */
- period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub));
+ ContentTime from;
+ from = sub_period.from + _pts_offset;
+ _have_current_subtitle = true;
+ if (sub_period.to) {
+ _current_subtitle_to = *sub_period.to + _pts_offset;
+ } else {
+ _current_subtitle_to = optional<ContentTime>();
+ }
for (unsigned int i = 0; i < sub.num_rects; ++i) {
AVSubtitleRect const * rect = sub.rects[i];
case SUBTITLE_NONE:
break;
case SUBTITLE_BITMAP:
- decode_bitmap_subtitle (rect, period);
+ decode_bitmap_subtitle (rect, from);
break;
case SUBTITLE_TEXT:
cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
break;
case SUBTITLE_ASS:
- decode_ass_subtitle (rect->ass, period);
+ decode_ass_subtitle (rect->ass, from);
break;
}
}
+ if (_current_subtitle_to) {
+ subtitle->emit_stop (*_current_subtitle_to);
+ }
+
avsubtitle_free (&sub);
}
void
-FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period)
+FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
{
- /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
- G, third B, fourth A.
+ /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
+ G, third R, fourth A.
*/
- shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
+ shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
/* Start of the first line in the subtitle */
uint32_t* out_line_p = out_p;
for (int x = 0; x < rect->w; ++x) {
RGBA const p = mapped_palette[*sub_line_p++];
- /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */
- *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b;
+ /* XXX: this seems to be wrong to me (isn't the output image BGRA?) but it looks right on screen */
+ *out_line_p++ = (p.a << 24) | (p.b << 16) | (p.g << 8) | p.r;
}
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
sub_p += rect->pict.linesize[0];
static_cast<double> (rect->h) / target_height
);
- subtitle->emit_image (period, image, scaled_rect);
+ subtitle->emit_image_start (from, image, scaled_rect);
}
void
-FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
+FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
{
/* We have no styles and no Format: line, so I'm assuming that FFmpeg
produces a single format of Dialogue: lines...
}
sub::RawSubtitle base;
- list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
+ list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
+ base,
+ bits[9],
+ _ffmpeg_content->video->size().width,
+ _ffmpeg_content->video->size().height
+ );
BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
- subtitle->emit_text (period, i);
+ subtitle->emit_text_start (from, i);
}
}