#include "image.h"
#include "util.h"
#include "log.h"
+#include "dcpomatic_log.h"
#include "ffmpeg_decoder.h"
#include "text_decoder.h"
#include "ffmpeg_audio_stream.h"
#include "compose.hpp"
#include "text_content.h"
#include "audio_content.h"
+#include "frame_interval_checker.h"
#include <dcp/subtitle_string.h>
#include <sub/ssa_reader.h>
#include <sub/subtitle.h>
#include "i18n.h"
-#define LOG_GENERAL(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
-#define LOG_ERROR(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
-#define LOG_WARNING_NC(...) dcpomatic_log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
-#define LOG_WARNING(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
-
using std::cout;
using std::string;
using std::vector;
using boost::optional;
using boost::dynamic_pointer_cast;
using dcp::Size;
+using namespace dcpomatic;
FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
: FFmpeg (c)
+ , Decoder (film)
, _have_current_subtitle (false)
{
- if (c->video) {
+ if (c->video && c->video->use()) {
video.reset (new VideoDecoder (this, c));
_pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
/* It doesn't matter what size or pixel format this is, it just needs to be black */
}
void
-FFmpegDecoder::flush (shared_ptr<const Film> film)
+FFmpegDecoder::flush ()
{
/* Get any remaining frames */
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
- while (video && decode_video_packet(film)) {}
+ while (video && decode_video_packet()) {}
if (audio) {
- decode_audio_packet (film);
+ decode_audio_packet ();
}
/* Make sure all streams are the same length and round up to the next video frame */
- FrameRateChange const frc = film->active_frame_rate_change(_ffmpeg_content->position());
- ContentTime full_length (_ffmpeg_content->full_length(film), frc);
+ FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
+ ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
full_length = full_length.ceil (frc.source);
if (video) {
double const vfr = _ffmpeg_content->video_frame_rate().get();
Frame const f = full_length.frames_round (vfr);
- Frame v = video->position(film).frames_round (vfr) + 1;
+ Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
while (v < f) {
- video->emit (film, shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
+ video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
++v;
}
}
BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
- ContentTime a = audio->stream_position(film, i);
+ ContentTime a = audio->stream_position(film(), i);
/* Unfortunately if a is 0 that really means that we don't know the stream position since
there has been no data on it since the last seek. In this case we'll just do nothing
here. I'm not sure if that's the right idea.
ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
silence->make_silent ();
- audio->emit (film, i, silence, a);
+ audio->emit (film(), i, silence, a);
a += to_do;
}
}
}
bool
-FFmpegDecoder::pass (shared_ptr<const Film> film)
+FFmpegDecoder::pass ()
{
+#ifdef DCPOMATIC_VARIANT_SWAROOP
+ if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
+ return true;
+ }
+#endif
+
int r = av_read_frame (_format_context, &_packet);
/* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
}
- flush (film);
+ flush ();
return true;
}
int const si = _packet.stream_index;
shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
- if (_video_stream && si == _video_stream.get() && !video->ignore()) {
- decode_video_packet (film);
+ if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
+ decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
decode_subtitle_packet ();
} else {
- decode_audio_packet (film);
+ decode_audio_packet ();
}
av_packet_unref (&_packet);
{
DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
+DCPOMATIC_DISABLE_WARNINGS
int const size = av_samples_get_buffer_size (
0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
);
+DCPOMATIC_ENABLE_WARNINGS
+
+ /* XXX: can't we just use _frame->nb_samples directly here? */
+ /* XXX: can't we use swr_convert() to do the format conversion? */
/* Deinterleave and convert to float */
case AV_SAMPLE_FMT_FLTP:
{
float** p = reinterpret_cast<float**> (_frame->data);
+ DCPOMATIC_ASSERT (_frame->channels <= channels);
/* Sometimes there aren't as many channels in the _frame as in the stream */
for (int i = 0; i < _frame->channels; ++i) {
memcpy (data[i], p[i], frames * sizeof(float));
AVSampleFormat
FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
{
+DCPOMATIC_DISABLE_WARNINGS
return stream->stream (_format_context)->codec->sample_fmt;
+DCPOMATIC_ENABLE_WARNINGS
}
int
}
void
-FFmpegDecoder::seek (shared_ptr<const Film> film, ContentTime time, bool accurate)
+FFmpegDecoder::seek (ContentTime time, bool accurate)
{
- Decoder::seek (film, time, accurate);
+ Decoder::seek (time, accurate);
/* If we are doing an `accurate' seek, we need to use pre-roll, as
we don't really know what the seek will give us.
if (_video_stream) {
stream = _video_stream;
} else {
+ DCPOMATIC_ASSERT (_ffmpeg_content->audio);
shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
if (s) {
stream = s->index (_format_context);
AVSEEK_FLAG_BACKWARD
);
+ {
+ /* Force re-creation of filter graphs to reset them and hence to make sure
+ they don't have any pre-seek frames knocking about.
+ */
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+ _filter_graphs.clear ();
+ }
+
if (video_codec_context ()) {
avcodec_flush_buffers (video_codec_context());
}
+DCPOMATIC_DISABLE_WARNINGS
BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
avcodec_flush_buffers (i->stream(_format_context)->codec);
}
+DCPOMATIC_ENABLE_WARNINGS
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
}
_have_current_subtitle = false;
+
+ BOOST_FOREACH (optional<ContentTime>& i, _next_time) {
+ i = optional<ContentTime>();
+ }
}
void
-FFmpegDecoder::decode_audio_packet (shared_ptr<const Film> film)
+FFmpegDecoder::decode_audio_packet ()
{
/* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
several times.
return;
}
+DCPOMATIC_DISABLE_WARNINGS
while (copy_packet.size > 0) {
int frame_finished;
shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
ContentTime ct;
- if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
+ if (_frame->pts == AV_NOPTS_VALUE) {
/* In some streams we see not every frame coming through with a timestamp; for those
that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
particularly noticeable with TrueHD streams (see #1111).
*/
- ct = *_next_time[stream_index];
+ if (_next_time[stream_index]) {
+ ct = *_next_time[stream_index];
+ }
} else {
ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
to_string(_pts_offset)
);
}
+DCPOMATIC_ENABLE_WARNINGS
/* Give this data provided there is some, and its time is sane */
if (ct >= ContentTime() && data->frames() > 0) {
- audio->emit (film, *stream, data, ct);
+ audio->emit (film(), *stream, data, ct);
}
}
}
bool
-FFmpegDecoder::decode_video_packet (shared_ptr<const Film> film)
+FFmpegDecoder::decode_video_packet ()
{
DCPOMATIC_ASSERT (_video_stream);
int frame_finished;
+DCPOMATIC_DISABLE_WARNINGS
if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
return false;
}
+DCPOMATIC_ENABLE_WARNINGS
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
video->emit (
- film,
+ film(),
shared_ptr<ImageProxy> (new RawImageProxy (image)),
- llrint(pts * _ffmpeg_content->active_video_frame_rate(film))
+ llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
/* Start of the first line in the subtitle */
uint8_t* sub_p = rect->pict.data[0];
- /* sub_p looks up into a BGRA palette which is here
+ /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
(i.e. first byte B, second G, third R, fourth A)
*/
- uint32_t const * palette = (uint32_t *) rect->pict.data[1];
+ uint8_t const * palette = rect->pict.data[1];
#else
/* Start of the first line in the subtitle */
uint8_t* sub_p = rect->data[0];
- /* sub_p looks up into a BGRA palette which is here
- (i.e. first byte B, second G, third R, fourth A)
+ /* sub_p looks up into a BGRA palette which is at rect->data[1].
+ (first byte B, second G, third R, fourth A)
*/
- uint32_t const * palette = (uint32_t *) rect->data[1];
+ uint8_t const * palette = rect->data[1];
#endif
/* And the stream has a map of those palette colours to colours
chosen by the user; created a `mapped' palette from those settings.
map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
vector<RGBA> mapped_palette (rect->nb_colors);
for (int i = 0; i < rect->nb_colors; ++i) {
- RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
+ RGBA c (palette[2], palette[1], palette[0], palette[3]);
map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
if (j != colour_map.end ()) {
mapped_palette[i] = j->second;
*/
mapped_palette[i] = c;
}
+ palette += 4;
}
/* Start of the output data */
- uint32_t* out_p = (uint32_t *) image->data()[0];
+ uint8_t* out_p = image->data()[0];
for (int y = 0; y < rect->h; ++y) {
uint8_t* sub_line_p = sub_p;
- uint32_t* out_line_p = out_p;
+ uint8_t* out_line_p = out_p;
for (int x = 0; x < rect->w; ++x) {
RGBA const p = mapped_palette[*sub_line_p++];
- /* XXX: this seems to be wrong to me (isn't the output image BGRA?) but it looks right on screen */
- *out_line_p++ = (p.a << 24) | (p.b << 16) | (p.g << 8) | p.r;
+ *out_line_p++ = p.b;
+ *out_line_p++ = p.g;
+ *out_line_p++ = p.r;
+ *out_line_p++ = p.a;
}
#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
sub_p += rect->pict.linesize[0];
#else
sub_p += rect->linesize[0];
#endif
- out_p += image->stride()[0] / sizeof (uint32_t);
+ out_p += image->stride()[0];
}
- int const target_width = subtitle_codec_context()->width;
- int const target_height = subtitle_codec_context()->height;
+ int target_width = subtitle_codec_context()->width;
+ if (target_width == 0 && video_codec_context()) {
+ /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
+ know if it's supposed to mean something from FFmpeg's point of view.
+ */
+ target_width = video_codec_context()->width;
+ }
+ int target_height = subtitle_codec_context()->height;
+ if (target_height == 0 && video_codec_context()) {
+ target_height = video_codec_context()->height;
+ }
+ DCPOMATIC_ASSERT (target_width);
+ DCPOMATIC_ASSERT (target_height);
dcpomatic::Rect<double> const scaled_rect (
static_cast<double> (rect->x) / target_width,
static_cast<double> (rect->y) / target_height,