#include <iomanip>
#include <iostream>
#include <stdint.h>
+#include <boost/lexical_cast.hpp>
extern "C" {
#include <tiffio.h>
#include <libavcodec/avcodec.h>
#include "transcoder.h"
#include "job.h"
#include "filter.h"
-#include "film_state.h"
#include "options.h"
#include "exceptions.h"
#include "image.h"
#include "util.h"
#include "log.h"
#include "ffmpeg_decoder.h"
+#include "subtitle.h"
using namespace std;
using namespace boost;
-FFmpegDecoder::FFmpegDecoder (boost::shared_ptr<const FilmState> s, boost::shared_ptr<const Options> o, Job* j, Log* l, bool minimal, bool ignore_length)
- : Decoder (s, o, j, l, minimal, ignore_length)
+FFmpegDecoder::FFmpegDecoder (boost::shared_ptr<Film> f, boost::shared_ptr<const Options> o, Job* j, bool minimal, bool ignore_length)
+ : Decoder (f, o, j, minimal, ignore_length)
, _format_context (0)
, _video_stream (-1)
, _audio_stream (-1)
+ , _subtitle_stream (-1)
, _frame (0)
, _video_codec_context (0)
, _video_codec (0)
, _audio_codec_context (0)
, _audio_codec (0)
+ , _subtitle_codec_context (0)
+ , _subtitle_codec (0)
{
setup_general ();
setup_video ();
setup_audio ();
+ setup_subtitle ();
}
FFmpegDecoder::~FFmpegDecoder ()
if (_video_codec_context) {
avcodec_close (_video_codec_context);
}
+
+ if (_subtitle_codec_context) {
+ avcodec_close (_subtitle_codec_context);
+ }
av_free (_frame);
avformat_close_input (&_format_context);
av_register_all ();
- if ((r = avformat_open_input (&_format_context, _fs->content_path().c_str(), 0, 0)) != 0) {
- throw OpenFileError (_fs->content_path ());
+ if ((r = avformat_open_input (&_format_context, _film->content_path().c_str(), 0, 0)) != 0) {
+ throw OpenFileError (_film->content_path ());
}
if (avformat_find_stream_info (_format_context, 0) < 0) {
throw DecodeError ("could not find stream information");
}
+ /* Find video, audio and subtitle streams and choose the first of each */
+
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- if (_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ AVStream* s = _format_context->streams[i];
+ if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
_video_stream = i;
- } else if (_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
- _audio_stream = i;
+ } else if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (_audio_stream == -1) {
+ _audio_stream = i;
+ }
+ _audio_streams.push_back (AudioStream (stream_name (s), i, s->codec->channels));
+ } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ if (_subtitle_stream == -1) {
+ _subtitle_stream = i;
+ }
+ _subtitle_streams.push_back (SubtitleStream (stream_name (s), i));
}
}
+ /* Now override audio and subtitle streams with those from the Film, if it has any */
+
+ if (_film->audio_stream_index() != -1) {
+ _audio_stream = _film->audio_stream().id();
+ }
+
+ if (_film->subtitle_stream_index() != -1) {
+ _subtitle_stream = _film->subtitle_stream().id ();
+ }
+
if (_video_stream < 0) {
throw DecodeError ("could not find video stream");
}
}
}
+void
+FFmpegDecoder::setup_subtitle ()
+{
+ if (_subtitle_stream < 0) {
+ return;
+ }
+
+ _subtitle_codec_context = _format_context->streams[_subtitle_stream]->codec;
+ _subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id);
+
+ if (_subtitle_codec == 0) {
+ throw DecodeError ("could not find subtitle decoder");
+ }
+
+ if (avcodec_open2 (_subtitle_codec_context, _subtitle_codec, 0) < 0) {
+ throw DecodeError ("could not open subtitle decoder");
+ }
+}
+
+
bool
FFmpegDecoder::do_pass ()
{
int r = av_read_frame (_format_context, &_packet);
+
if (r < 0) {
if (r != AVERROR_EOF) {
throw DecodeError ("error on av_read_frame");
_packet.data = 0;
_packet.size = 0;
+ /* XXX: should we reset _packet.data and size after each *_decode_* call? */
+
int frame_finished;
while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
int const data_size = av_samples_get_buffer_size (
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
-
- assert (_audio_codec_context->channels == _fs->audio_channels);
+
+ assert (_audio_codec_context->channels == _film->audio_channels());
process_audio (_frame->data[0], data_size);
}
}
return true;
}
+ double const pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base) * _packet.pts;
+
if (_packet.stream_index == _video_stream) {
+ if (!_first_video) {
+ _first_video = pts_seconds;
+ }
+
int frame_finished;
if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
process_video (_frame);
}
- } else if (_audio_stream >= 0 && _packet.stream_index == _audio_stream && _opt->decode_audio) {
+ } else if (_audio_stream >= 0 && _packet.stream_index == _audio_stream && _opt->decode_audio && _first_video && _first_video.get() <= pts_seconds) {
+
+ /* Note: We only decode audio if we've had our first video packet through, and if it
+ was before this packet. Until then audio is thrown away.
+ */
+
+ if (!_first_audio) {
+ _first_audio = pts_seconds;
+
+ /* This is our first audio packet, and if we've arrived here we must have had our
+ first video packet. Push some silence to make up the gap between our first
+ video packet and our first audio.
+ */
+
+ /* frames of silence that we must push */
+ int const s = rint ((_first_audio.get() - _first_video.get()) * audio_sample_rate ());
+
+ _log->log (
+ String::compose (
+ "First video at %1, first audio at %2, pushing %3 frames of silence for %4 channels (%5 bytes per sample)",
+ _first_video.get(), _first_audio.get(), s, audio_channels(), bytes_per_audio_sample()
+ )
+ );
+
+ /* hence bytes */
+ int const b = s * audio_channels() * bytes_per_audio_sample();
+
+ /* XXX: this assumes that it won't be too much, and there are shaky assumptions
+ that all sound representations are silent with memset()ed zero data.
+ */
+ uint8_t silence[b];
+ memset (silence, 0, b);
+ process_audio (silence, b);
+ }
avcodec_get_frame_defaults (_frame);
int const data_size = av_samples_get_buffer_size (
0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1
);
-
- assert (_audio_codec_context->channels == _fs->audio_channels);
+
+ assert (_audio_codec_context->channels == _film->audio_channels());
process_audio (_frame->data[0], data_size);
}
+
+ } else if (_subtitle_stream >= 0 && _packet.stream_index == _subtitle_stream && _opt->decode_subtitles && _first_video) {
+
+ int got_subtitle;
+ AVSubtitle sub;
+ if (avcodec_decode_subtitle2 (_subtitle_codec_context, &sub, &got_subtitle, &_packet) && got_subtitle) {
+ /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
+ indicate that the previous subtitle should stop.
+ */
+ if (sub.num_rects > 0) {
+ process_subtitle (shared_ptr<TimedSubtitle> (new TimedSubtitle (sub, _first_video.get())));
+ } else {
+ process_subtitle (shared_ptr<TimedSubtitle> ());
+ }
+ avsubtitle_free (&sub);
+ }
}
av_free_packet (&_packet);
return false;
}
-int
-FFmpegDecoder::length_in_frames () const
-{
- return (_format_context->duration / AV_TIME_BASE) * frames_per_second ();
-}
-
float
FFmpegDecoder::frames_per_second () const
{
return _video_codec_context->sample_aspect_ratio.den;
}
+bool
+FFmpegDecoder::has_subtitles () const
+{
+ return (_subtitle_stream != -1);
+}
+
+vector<AudioStream>
+FFmpegDecoder::audio_streams () const
+{
+ return _audio_streams;
+}
+
+vector<SubtitleStream>
+FFmpegDecoder::subtitle_streams () const
+{
+ return _subtitle_streams;
+}
+
+string
+FFmpegDecoder::stream_name (AVStream* s) const
+{
+ stringstream n;
+
+ AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
+ if (lang) {
+ n << lang->value;
+ }
+
+ AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
+ if (title) {
+ if (!n.str().empty()) {
+ n << " ";
+ }
+ n << title->value;
+ }
+
+ if (n.str().empty()) {
+ n << "unknown";
+ }
+
+ return n.str ();
+}
+