2 Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include <libavcodec/avcodec.h>
23 #include <libavformat/avformat.h>
24 #include <libavutil/pixfmt.h>
25 #include <libavutil/pixdesc.h>
27 #include "ffmpeg_examiner.h"
28 #include "ffmpeg_content.h"
30 #include "ffmpeg_audio_stream.h"
31 #include "ffmpeg_subtitle_stream.h"
33 #include <locked_sstream.h>
34 #include <boost/foreach.hpp>
42 using boost::shared_ptr;
43 using boost::optional;
45 /** @param job job that the examiner is operating in, or 0 */
46 FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c, shared_ptr<Job> job)
49 , _need_video_length (false)
51 /* Find audio and subtitle streams */
53 for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
54 AVStream* s = _format_context->streams[i];
55 if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
57 /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
58 so bodge it here. No idea why we should have to do this.
61 if (s->codec->channel_layout == 0) {
62 s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
65 DCPOMATIC_ASSERT (_format_context->duration != AV_NOPTS_VALUE);
66 DCPOMATIC_ASSERT (s->codec->codec);
67 DCPOMATIC_ASSERT (s->codec->codec->name);
69 _audio_streams.push_back (
70 shared_ptr<FFmpegAudioStream> (
71 new FFmpegAudioStream (
73 s->codec->codec->name,
75 s->codec->sample_rate,
76 (double (_format_context->duration) / AV_TIME_BASE) * s->codec->sample_rate,
82 } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
83 _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
88 /* See if the header has duration information in it */
89 _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
90 if (!_need_video_length) {
91 _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
96 if (_need_video_length) {
97 job->sub (_("Finding length and subtitles"));
98 } else if (!_subtitle_streams.empty()) {
99 job->sub (_("Finding subtitles"));
101 job->sub (_("Finding length"));
105 /* Run through until we find:
107 * - the first audio for each stream.
108 * - the subtitle periods for each stream.
110 * We have to note subtitle periods as otherwise we have no way of knowing
111 * where we should look for subtitles (video and audio are always present,
115 int64_t const len = _file_group.length ();
117 int r = av_read_frame (_format_context, &_packet);
124 job->set_progress (float (_format_context->pb->pos) / len);
126 job->set_progress_unknown ();
130 AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
132 if (_video_stream && _packet.stream_index == _video_stream.get()) {
133 video_packet (context);
136 bool got_all_audio = true;
138 for (size_t i = 0; i < _audio_streams.size(); ++i) {
139 if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) {
140 audio_packet (context, _audio_streams[i]);
142 if (!_audio_streams[i]->first_audio) {
143 got_all_audio = false;
147 for (size_t i = 0; i < _subtitle_streams.size(); ++i) {
148 if (_subtitle_streams[i]->uses_index (_format_context, _packet.stream_index)) {
149 subtitle_packet (context, _subtitle_streams[i]);
153 av_packet_unref (&_packet);
155 if (_first_video && got_all_audio && _subtitle_streams.empty ()) {
161 /* Finish off any hanging subtitles at the end */
162 for (LastSubtitleMap::const_iterator i = _last_subtitle_start.begin(); i != _last_subtitle_start.end(); ++i) {
164 if (i->second->image) {
165 i->first->add_image_subtitle (
169 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
173 i->first->add_text_subtitle (
177 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
184 /* We just added subtitles to our streams without taking the PTS offset into account;
185 this is because we might not know the PTS offset when the first subtitle is seen.
186 Now we know the PTS offset so we can apply it to those subtitles.
188 if (has_video() && video_frame_rate()) {
189 BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, _subtitle_streams) {
190 i->add_offset (pts_offset (_audio_streams, _first_video, video_frame_rate().get()));
196 FFmpegExaminer::video_packet (AVCodecContext* context)
198 DCPOMATIC_ASSERT (_video_stream);
200 if (_first_video && !_need_video_length) {
205 if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
207 _first_video = frame_time (_format_context->streams[_video_stream.get()]);
209 if (_need_video_length) {
210 _video_length = frame_time (
211 _format_context->streams[_video_stream.get()]
212 ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
218 FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream)
220 if (stream->first_audio) {
225 if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
226 stream->first_audio = frame_time (stream->stream (_format_context));
231 FFmpegExaminer::subtitle_packet (AVCodecContext* context, shared_ptr<FFmpegSubtitleStream> stream)
235 if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
236 string id = subtitle_id (sub);
237 FFmpegSubtitlePeriod const period = subtitle_period (sub);
238 bool const starts_image = subtitle_starts_image (sub);
240 LastSubtitleMap::iterator last = _last_subtitle_start.find (stream);
241 if (last != _last_subtitle_start.end() && last->second) {
242 /* We have seen the start of a subtitle but not yet the end. Whatever this is
243 finishes the previous subtitle, so add it */
244 if (last->second->image) {
245 stream->add_image_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
247 stream->add_text_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
249 if (sub.num_rects == 0) {
250 /* This is a `proper' end-of-subtitle */
251 _last_subtitle_start[stream] = optional<SubtitleStart> ();
253 /* This is just another subtitle, so we start again */
254 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
256 } else if (sub.num_rects == 1) {
259 stream->add_image_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
261 stream->add_text_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
264 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
268 for (unsigned int i = 0; i < sub.num_rects; ++i) {
269 if (sub.rects[i]->type == SUBTITLE_BITMAP) {
270 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
271 uint32_t* palette = (uint32_t *) sub.rects[i]->pict.data[1];
272 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
274 (palette[j] & 0x00ff0000) >> 16,
275 (palette[j] & 0x0000ff00) >> 8,
276 (palette[j] & 0x000000ff) >> 0,
277 (palette[j] & 0xff000000) >> 24
280 stream->set_colour (rgba, rgba);
283 uint32_t* palette = (uint32_t *) sub.rects[i]->data[1];
284 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
286 (palette[j] & 0x00ff0000) >> 16,
287 (palette[j] & 0x0000ff00) >> 8,
288 (palette[j] & 0x000000ff) >> 0,
289 (palette[j] & 0xff000000) >> 24
292 stream->set_colour (rgba, rgba);
298 avsubtitle_free (&sub);
302 optional<ContentTime>
303 FFmpegExaminer::frame_time (AVStream* s) const
305 optional<ContentTime> t;
307 int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
308 if (bet != AV_NOPTS_VALUE) {
309 t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
316 FFmpegExaminer::video_frame_rate () const
318 DCPOMATIC_ASSERT (_video_stream);
319 /* This use of r_frame_rate is debateable; there's a few different
320 * frame rates in the format context, but this one seems to be the most
323 return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream.get()]));
327 FFmpegExaminer::video_size () const
329 return dcp::Size (video_codec_context()->width, video_codec_context()->height);
332 /** @return Length according to our content's header */
334 FFmpegExaminer::video_length () const
336 return max (Frame (1), _video_length);
340 FFmpegExaminer::sample_aspect_ratio () const
342 DCPOMATIC_ASSERT (_video_stream);
343 AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream.get()], 0);
345 /* I assume this means that we don't know */
346 return optional<double> ();
348 return double (sar.num) / sar.den;
352 FFmpegExaminer::subtitle_stream_name (AVStream* s) const
354 locked_stringstream n;
356 n << stream_name (s);
358 if (n.str().empty()) {
366 FFmpegExaminer::stream_name (AVStream* s) const
368 locked_stringstream n;
371 AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
376 AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
378 if (!n.str().empty()) {
389 FFmpegExaminer::bits_per_pixel () const
391 if (video_codec_context()->pix_fmt == -1) {
392 throw DecodeError (_("Could not find pixel format for video."));
395 AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (video_codec_context()->pix_fmt);
396 DCPOMATIC_ASSERT (d);
397 return av_get_bits_per_pixel (d);
401 FFmpegExaminer::yuv () const
403 switch (video_codec_context()->pix_fmt) {
404 case AV_PIX_FMT_YUV420P:
405 case AV_PIX_FMT_YUYV422:
406 case AV_PIX_FMT_YUV422P:
407 case AV_PIX_FMT_YUV444P:
408 case AV_PIX_FMT_YUV410P:
409 case AV_PIX_FMT_YUV411P:
410 case AV_PIX_FMT_YUVJ420P:
411 case AV_PIX_FMT_YUVJ422P:
412 case AV_PIX_FMT_YUVJ444P:
413 case AV_PIX_FMT_UYVY422:
414 case AV_PIX_FMT_UYYVYY411:
415 case AV_PIX_FMT_NV12:
416 case AV_PIX_FMT_NV21:
417 case AV_PIX_FMT_YUV440P:
418 case AV_PIX_FMT_YUVJ440P:
419 case AV_PIX_FMT_YUVA420P:
420 case AV_PIX_FMT_YUV420P16LE:
421 case AV_PIX_FMT_YUV420P16BE:
422 case AV_PIX_FMT_YUV422P16LE:
423 case AV_PIX_FMT_YUV422P16BE:
424 case AV_PIX_FMT_YUV444P16LE:
425 case AV_PIX_FMT_YUV444P16BE:
426 case AV_PIX_FMT_YUV420P9BE:
427 case AV_PIX_FMT_YUV420P9LE:
428 case AV_PIX_FMT_YUV420P10BE:
429 case AV_PIX_FMT_YUV420P10LE:
430 case AV_PIX_FMT_YUV422P10BE:
431 case AV_PIX_FMT_YUV422P10LE:
432 case AV_PIX_FMT_YUV444P9BE:
433 case AV_PIX_FMT_YUV444P9LE:
434 case AV_PIX_FMT_YUV444P10BE:
435 case AV_PIX_FMT_YUV444P10LE:
436 case AV_PIX_FMT_YUV422P9BE:
437 case AV_PIX_FMT_YUV422P9LE:
438 case AV_PIX_FMT_YUVA420P9BE:
439 case AV_PIX_FMT_YUVA420P9LE:
440 case AV_PIX_FMT_YUVA422P9BE:
441 case AV_PIX_FMT_YUVA422P9LE:
442 case AV_PIX_FMT_YUVA444P9BE:
443 case AV_PIX_FMT_YUVA444P9LE:
444 case AV_PIX_FMT_YUVA420P10BE:
445 case AV_PIX_FMT_YUVA420P10LE:
446 case AV_PIX_FMT_YUVA422P10BE:
447 case AV_PIX_FMT_YUVA422P10LE:
448 case AV_PIX_FMT_YUVA444P10BE:
449 case AV_PIX_FMT_YUVA444P10LE:
450 case AV_PIX_FMT_YUVA420P16BE:
451 case AV_PIX_FMT_YUVA420P16LE:
452 case AV_PIX_FMT_YUVA422P16BE:
453 case AV_PIX_FMT_YUVA422P16LE:
454 case AV_PIX_FMT_YUVA444P16BE:
455 case AV_PIX_FMT_YUVA444P16LE:
456 case AV_PIX_FMT_NV16:
457 case AV_PIX_FMT_NV20LE:
458 case AV_PIX_FMT_NV20BE:
459 case AV_PIX_FMT_YVYU422:
460 case AV_PIX_FMT_YUVA444P:
461 case AV_PIX_FMT_YUVA422P:
462 case AV_PIX_FMT_YUV420P12BE:
463 case AV_PIX_FMT_YUV420P12LE:
464 case AV_PIX_FMT_YUV420P14BE:
465 case AV_PIX_FMT_YUV420P14LE:
466 case AV_PIX_FMT_YUV422P12BE:
467 case AV_PIX_FMT_YUV422P12LE:
468 case AV_PIX_FMT_YUV422P14BE:
469 case AV_PIX_FMT_YUV422P14LE:
470 case AV_PIX_FMT_YUV444P12BE:
471 case AV_PIX_FMT_YUV444P12LE:
472 case AV_PIX_FMT_YUV444P14BE:
473 case AV_PIX_FMT_YUV444P14LE:
474 case AV_PIX_FMT_YUVJ411P:
482 FFmpegExaminer::has_video () const
484 return static_cast<bool> (_video_stream);