#include "ffmpeg_content.h"
#include "raw_image_proxy.h"
#include "film.h"
-#include "timer.h"
+#include "compose.hpp"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
#include <boost/foreach.hpp>
-#include <stdexcept>
#include <vector>
#include <iomanip>
#include <iostream>
#include <stdint.h>
-#include <sndfile.h>
#include "i18n.h"
-#define LOG_GENERAL(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
-#define LOG_ERROR(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_ERROR);
-#define LOG_WARNING_NC(...) _video_content->film()->log()->log (__VA_ARGS__, Log::TYPE_WARNING);
-#define LOG_WARNING(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_WARNING);
+#define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
+#define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
+#define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
+#define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
using std::cout;
-using std::string;
using std::vector;
using std::list;
using std::min;
using std::pair;
-using std::make_pair;
using std::max;
using boost::shared_ptr;
-using boost::optional;
-using boost::dynamic_pointer_cast;
using dcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
: VideoDecoder (c)
- , AudioDecoder (c)
+ , AudioDecoder (c, fast)
, SubtitleDecoder (c)
, FFmpeg (c)
, _log (log)
+ , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video_frame_rate()))
{
- /* Audio and video frame PTS values may not start with 0. We want
- to fiddle them so that:
- 1. One of them starts at time 0.
- 2. The first video PTS value ends up on a frame boundary.
-
- Then we remove big initial gaps in PTS and we allow our
- insertion of black frames to work.
-
- We will do:
- audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
- video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
- */
-
- /* First, make one of them start at 0 */
-
- vector<shared_ptr<FFmpegAudioStream> > streams = c->ffmpeg_audio_streams ();
-
- _pts_offset = ContentTime::min ();
-
- if (c->first_video ()) {
- _pts_offset = - c->first_video().get ();
- }
-
- BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, streams) {
- if (i->first_audio) {
- _pts_offset = max (_pts_offset, - i->first_audio.get ());
- }
- }
-
- /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
- I don't think we ever want to do that, as it seems things at -ve PTS are not meant
- to be seen (use for alignment bars etc.); see mantis #418.
- */
- if (_pts_offset > ContentTime ()) {
- _pts_offset = ContentTime ();
- }
-
- /* Now adjust so that the video pts starts on a frame */
- if (c->first_video ()) {
- ContentTime first_video = c->first_video().get() + _pts_offset;
- ContentTime const old_first_video = first_video;
- _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
- }
}
void
FFmpegDecoder::flush ()
{
/* Get any remaining frames */
-
+
_packet.data = 0;
_packet.size = 0;
-
+
/* XXX: should we reset _packet.data and size after each *_decode_* call? */
-
+
while (decode_video_packet ()) {}
-
+
decode_audio_packet ();
AudioDecoder::flush ();
}
bool
-FFmpegDecoder::pass (PassReason reason)
+FFmpegDecoder::pass (PassReason reason, bool accurate)
{
int r = av_read_frame (_format_context, &_packet);
av_strerror (r, buf, sizeof(buf));
LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
}
-
+
flush ();
return true;
}
int const si = _packet.stream_index;
shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
- if (si == _video_stream && !_ignore_video && reason != PASS_REASON_SUBTITLE) {
+ if (si == _video_stream && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) {
decode_video_packet ();
} else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
decode_subtitle_packet ();
- } else if (reason != PASS_REASON_SUBTITLE) {
+ } else if (accurate || reason != PASS_REASON_SUBTITLE) {
decode_audio_packet ();
}
}
}
break;
-
+
case AV_SAMPLE_FMT_S16:
{
int16_t* p = reinterpret_cast<int16_t *> (data[0]);
}
}
break;
-
+
case AV_SAMPLE_FMT_S32:
{
int32_t* p = reinterpret_cast<int32_t *> (data[0]);
}
}
break;
-
+
case AV_SAMPLE_FMT_FLTP:
{
float** p = reinterpret_cast<float**> (data);
/* XXX: it seems debatable whether PTS should be used here...
http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
*/
-
- ContentTime const u = time - _pts_offset;
- av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), 0);
+
+ ContentTime u = time - _pts_offset;
+ if (u < ContentTime ()) {
+ u = ContentTime ();
+ }
+ av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers (video_codec_context());
/* XXX: should be flushing audio buffers? */
-
+
if (subtitle_codec_context ()) {
avcodec_flush_buffers (subtitle_codec_context ());
}
/* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
several times.
*/
-
+
AVPacket copy_packet = _packet;
/* XXX: inefficient */
/* The packet's stream may not be an audio one; just ignore it in this method if so */
return;
}
-
+
while (copy_packet.size > 0) {
int frame_finished;
}
if (frame_finished) {
- ContentTime const ct = ContentTime::from_seconds (
+ ContentTime ct = ContentTime::from_seconds (
av_frame_get_best_effort_timestamp (_frame) *
av_q2d ((*stream)->stream (_format_context)->time_base))
+ _pts_offset;
-
+
int const data_size = av_samples_get_buffer_size (
0, (*stream)->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (*stream), 1
);
- audio (*stream, deinterleave_audio (*stream, _frame->data, data_size), ct);
+ shared_ptr<AudioBuffers> data = deinterleave_audio (*stream, _frame->data, data_size);
+
+ if (ct < ContentTime ()) {
+ /* Discard audio data that comes before time 0 */
+ Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
+ data->move (remove, 0, data->frames() - remove);
+ data->set_frames (data->frames() - remove);
+ ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
+ }
+
+ if (data->frames() > 0) {
+ audio (*stream, data, ct);
+ }
}
-
+
copy_packet.data += decode_result;
copy_packet.size -= decode_result;
}
boost::mutex::scoped_lock lm (_filter_graphs_mutex);
shared_ptr<FilterGraph> graph;
-
+
list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
++i;
for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
shared_ptr<Image> image = i->first;
-
+
if (i->second != AV_NOPTS_VALUE) {
double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
video (
shared_ptr<ImageProxy> (new RawImageProxy (image)),
- rint (pts * _ffmpeg_content->video_frame_rate ())
+ llrint (pts * _ffmpeg_content->video_frame_rate ())
);
} else {
LOG_WARNING_NC ("Dropping frame without PTS");
return true;
}
-
+
void
FFmpegDecoder::decode_subtitle_packet ()
{
if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
return;
}
-
+
if (sub.num_rects <= 0) {
/* Sometimes we get an empty AVSubtitle, which is used by some codecs to
indicate that the previous subtitle should stop. We can ignore it here.
*/
return;
- } else if (sub.num_rects > 1) {
- throw DecodeError (_("multi-part subtitles not yet supported"));
}
/* Subtitle PTS (within the source, not taking into account any of the
period.to = sub_period.to.get() + _pts_offset;
} else {
/* We have to look up the `to' time in the stream's records */
- period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (sub_period.from);
+ period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (period.from);
}
-
- AVSubtitleRect const * rect = sub.rects[0];
-
- switch (rect->type) {
- case SUBTITLE_NONE:
- break;
- case SUBTITLE_BITMAP:
- decode_bitmap_subtitle (rect, period);
- break;
- case SUBTITLE_TEXT:
- cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
- break;
- case SUBTITLE_ASS:
- cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n";
- break;
+
+ for (unsigned int i = 0; i < sub.num_rects; ++i) {
+ AVSubtitleRect const * rect = sub.rects[i];
+
+ switch (rect->type) {
+ case SUBTITLE_NONE:
+ break;
+ case SUBTITLE_BITMAP:
+ decode_bitmap_subtitle (rect, period);
+ break;
+ case SUBTITLE_TEXT:
+ cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
+ break;
+ case SUBTITLE_ASS:
+ cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n";
+ break;
+ }
}
-
+
avsubtitle_free (&sub);
}
/* Note RGBA is expressed little-endian, so the first byte in the word is R, second
G, third B, fourth A.
*/
- shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
-
+ shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
+
/* Start of the first line in the subtitle */
uint8_t* sub_p = rect->pict.data[0];
/* sub_p looks up into a BGRA palette which is here
uint32_t const * palette = (uint32_t *) rect->pict.data[1];
/* Start of the output data */
uint32_t* out_p = (uint32_t *) image->data()[0];
-
+
for (int y = 0; y < rect->h; ++y) {
uint8_t* sub_line_p = sub_p;
uint32_t* out_line_p = out_p;
sub_p += rect->pict.linesize[0];
out_p += image->stride()[0] / sizeof (uint32_t);
}
-
+
dcp::Size const vs = _ffmpeg_content->video_size ();
dcpomatic::Rect<double> const scaled_rect (
static_cast<double> (rect->x) / vs.width,
static_cast<double> (rect->w) / vs.width,
static_cast<double> (rect->h) / vs.height
);
-
+
image_subtitle (period, image, scaled_rect);
}
-