2 Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <libavcodec/avcodec.h>
22 #include <libavformat/avformat.h>
23 #include <libavutil/pixfmt.h>
24 #include <libavutil/pixdesc.h>
26 #include "ffmpeg_examiner.h"
27 #include "ffmpeg_content.h"
29 #include "ffmpeg_audio_stream.h"
30 #include "ffmpeg_subtitle_stream.h"
32 #include "safe_stringstream.h"
33 #include <boost/foreach.hpp>
41 using boost::shared_ptr;
42 using boost::optional;
44 /** @param job job that the examiner is operating in, or 0 */
45 FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c, shared_ptr<Job> job)
48 , _need_video_length (false)
50 /* Find audio and subtitle streams */
52 for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
53 AVStream* s = _format_context->streams[i];
54 if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
56 /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
57 so bodge it here. No idea why we should have to do this.
60 if (s->codec->channel_layout == 0) {
61 s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
64 DCPOMATIC_ASSERT (_format_context->duration != AV_NOPTS_VALUE);
66 _audio_streams.push_back (
67 shared_ptr<FFmpegAudioStream> (
68 new FFmpegAudioStream (
69 audio_stream_name (s),
71 s->codec->sample_rate,
72 (double (_format_context->duration) / AV_TIME_BASE) * s->codec->sample_rate,
78 } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
79 _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
84 /* See if the header has duration information in it */
85 _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
86 if (!_need_video_length) {
87 _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
92 if (_need_video_length) {
93 job->sub (_("Finding length and subtitles"));
95 job->sub (_("Finding subtitles"));
99 /* Run through until we find:
101 * - the first audio for each stream.
102 * - the subtitle periods for each stream.
104 * We have to note subtitle periods as otherwise we have no way of knowing
105 * where we should look for subtitles (video and audio are always present,
109 int64_t const len = _file_group.length ();
111 int r = av_read_frame (_format_context, &_packet);
118 job->set_progress (float (_format_context->pb->pos) / len);
120 job->set_progress_unknown ();
124 AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
126 if (_video_stream && _packet.stream_index == _video_stream.get()) {
127 video_packet (context);
130 bool got_all_audio = true;
132 for (size_t i = 0; i < _audio_streams.size(); ++i) {
133 if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) {
134 audio_packet (context, _audio_streams[i]);
136 if (!_audio_streams[i]->first_audio) {
137 got_all_audio = false;
141 for (size_t i = 0; i < _subtitle_streams.size(); ++i) {
142 if (_subtitle_streams[i]->uses_index (_format_context, _packet.stream_index)) {
143 subtitle_packet (context, _subtitle_streams[i]);
147 av_packet_unref (&_packet);
149 if (_first_video && got_all_audio && _subtitle_streams.empty ()) {
155 /* Finish off any hanging subtitles at the end */
156 for (LastSubtitleMap::const_iterator i = _last_subtitle_start.begin(); i != _last_subtitle_start.end(); ++i) {
158 if (i->second->image) {
159 i->first->add_image_subtitle (
163 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
167 i->first->add_text_subtitle (
171 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
178 /* We just added subtitles to our streams without taking the PTS offset into account;
179 this is because we might not know the PTS offset when the first subtitle is seen.
180 Now we know the PTS offset so we can apply it to those subtitles.
182 if (has_video() && video_frame_rate()) {
183 BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, _subtitle_streams) {
184 i->add_offset (pts_offset (_audio_streams, _first_video, video_frame_rate().get()));
190 FFmpegExaminer::video_packet (AVCodecContext* context)
192 DCPOMATIC_ASSERT (_video_stream);
194 if (_first_video && !_need_video_length) {
199 if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
201 _first_video = frame_time (_format_context->streams[_video_stream.get()]);
203 if (_need_video_length) {
204 _video_length = frame_time (
205 _format_context->streams[_video_stream.get()]
206 ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
212 FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream)
214 if (stream->first_audio) {
219 if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
220 stream->first_audio = frame_time (stream->stream (_format_context));
225 FFmpegExaminer::subtitle_packet (AVCodecContext* context, shared_ptr<FFmpegSubtitleStream> stream)
229 if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
230 string id = subtitle_id (sub);
231 FFmpegSubtitlePeriod const period = subtitle_period (sub);
232 bool const starts_image = subtitle_starts_image (sub);
234 LastSubtitleMap::iterator last = _last_subtitle_start.find (stream);
235 if (last != _last_subtitle_start.end() && last->second) {
236 /* We have seen the start of a subtitle but not yet the end. Whatever this is
237 finishes the previous subtitle, so add it */
238 if (last->second->image) {
239 stream->add_image_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
241 stream->add_text_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
243 if (sub.num_rects == 0) {
244 /* This is a `proper' end-of-subtitle */
245 _last_subtitle_start[stream] = optional<SubtitleStart> ();
247 /* This is just another subtitle, so we start again */
248 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
250 } else if (sub.num_rects == 1) {
253 stream->add_image_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
255 stream->add_text_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
258 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
262 for (unsigned int i = 0; i < sub.num_rects; ++i) {
263 if (sub.rects[i]->type == SUBTITLE_BITMAP) {
264 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
265 uint32_t* palette = (uint32_t *) sub.rects[i]->pict.data[1];
266 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
268 (palette[j] & 0x00ff0000) >> 16,
269 (palette[j] & 0x0000ff00) >> 8,
270 (palette[j] & 0x000000ff) >> 0,
271 (palette[j] & 0xff000000) >> 24
274 stream->set_colour (rgba, rgba);
277 uint32_t* palette = (uint32_t *) sub.rects[i]->data[1];
278 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
280 (palette[j] & 0x00ff0000) >> 16,
281 (palette[j] & 0x0000ff00) >> 8,
282 (palette[j] & 0x000000ff) >> 0,
283 (palette[j] & 0xff000000) >> 24
286 stream->set_colour (rgba, rgba);
292 avsubtitle_free (&sub);
296 optional<ContentTime>
297 FFmpegExaminer::frame_time (AVStream* s) const
299 optional<ContentTime> t;
301 int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
302 if (bet != AV_NOPTS_VALUE) {
303 t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
310 FFmpegExaminer::video_frame_rate () const
312 DCPOMATIC_ASSERT (_video_stream);
313 /* This use of r_frame_rate is debateable; there's a few different
314 * frame rates in the format context, but this one seems to be the most
317 return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream.get()]));
321 FFmpegExaminer::video_size () const
323 return dcp::Size (video_codec_context()->width, video_codec_context()->height);
326 /** @return Length according to our content's header */
328 FFmpegExaminer::video_length () const
330 return max (Frame (1), _video_length);
334 FFmpegExaminer::sample_aspect_ratio () const
336 DCPOMATIC_ASSERT (_video_stream);
337 AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream.get()], 0);
339 /* I assume this means that we don't know */
340 return optional<double> ();
342 return double (sar.num) / sar.den;
346 FFmpegExaminer::audio_stream_name (AVStream* s) const
350 n << stream_name (s);
352 if (!n.str().empty()) {
356 n << s->codec->channels << " channels";
362 FFmpegExaminer::subtitle_stream_name (AVStream* s) const
366 n << stream_name (s);
368 if (n.str().empty()) {
376 FFmpegExaminer::stream_name (AVStream* s) const
381 AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
386 AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
388 if (!n.str().empty()) {
399 FFmpegExaminer::bits_per_pixel () const
401 if (video_codec_context()->pix_fmt == -1) {
402 throw DecodeError (_("Could not find pixel format for video."));
405 AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (video_codec_context()->pix_fmt);
406 DCPOMATIC_ASSERT (d);
407 return av_get_bits_per_pixel (d);
411 FFmpegExaminer::yuv () const
413 switch (video_codec_context()->pix_fmt) {
414 case AV_PIX_FMT_YUV420P:
415 case AV_PIX_FMT_YUYV422:
416 case AV_PIX_FMT_YUV422P:
417 case AV_PIX_FMT_YUV444P:
418 case AV_PIX_FMT_YUV410P:
419 case AV_PIX_FMT_YUV411P:
420 case AV_PIX_FMT_YUVJ420P:
421 case AV_PIX_FMT_YUVJ422P:
422 case AV_PIX_FMT_YUVJ444P:
423 case AV_PIX_FMT_UYVY422:
424 case AV_PIX_FMT_UYYVYY411:
425 case AV_PIX_FMT_NV12:
426 case AV_PIX_FMT_NV21:
427 case AV_PIX_FMT_YUV440P:
428 case AV_PIX_FMT_YUVJ440P:
429 case AV_PIX_FMT_YUVA420P:
430 case AV_PIX_FMT_YUV420P16LE:
431 case AV_PIX_FMT_YUV420P16BE:
432 case AV_PIX_FMT_YUV422P16LE:
433 case AV_PIX_FMT_YUV422P16BE:
434 case AV_PIX_FMT_YUV444P16LE:
435 case AV_PIX_FMT_YUV444P16BE:
436 case AV_PIX_FMT_YUV420P9BE:
437 case AV_PIX_FMT_YUV420P9LE:
438 case AV_PIX_FMT_YUV420P10BE:
439 case AV_PIX_FMT_YUV420P10LE:
440 case AV_PIX_FMT_YUV422P10BE:
441 case AV_PIX_FMT_YUV422P10LE:
442 case AV_PIX_FMT_YUV444P9BE:
443 case AV_PIX_FMT_YUV444P9LE:
444 case AV_PIX_FMT_YUV444P10BE:
445 case AV_PIX_FMT_YUV444P10LE:
446 case AV_PIX_FMT_YUV422P9BE:
447 case AV_PIX_FMT_YUV422P9LE:
448 case AV_PIX_FMT_YUVA420P9BE:
449 case AV_PIX_FMT_YUVA420P9LE:
450 case AV_PIX_FMT_YUVA422P9BE:
451 case AV_PIX_FMT_YUVA422P9LE:
452 case AV_PIX_FMT_YUVA444P9BE:
453 case AV_PIX_FMT_YUVA444P9LE:
454 case AV_PIX_FMT_YUVA420P10BE:
455 case AV_PIX_FMT_YUVA420P10LE:
456 case AV_PIX_FMT_YUVA422P10BE:
457 case AV_PIX_FMT_YUVA422P10LE:
458 case AV_PIX_FMT_YUVA444P10BE:
459 case AV_PIX_FMT_YUVA444P10LE:
460 case AV_PIX_FMT_YUVA420P16BE:
461 case AV_PIX_FMT_YUVA420P16LE:
462 case AV_PIX_FMT_YUVA422P16BE:
463 case AV_PIX_FMT_YUVA422P16LE:
464 case AV_PIX_FMT_YUVA444P16BE:
465 case AV_PIX_FMT_YUVA444P16LE:
466 case AV_PIX_FMT_NV16:
467 case AV_PIX_FMT_NV20LE:
468 case AV_PIX_FMT_NV20BE:
469 case AV_PIX_FMT_YVYU422:
470 case AV_PIX_FMT_YUVA444P:
471 case AV_PIX_FMT_YUVA422P:
472 case AV_PIX_FMT_YUV420P12BE:
473 case AV_PIX_FMT_YUV420P12LE:
474 case AV_PIX_FMT_YUV420P14BE:
475 case AV_PIX_FMT_YUV420P14LE:
476 case AV_PIX_FMT_YUV422P12BE:
477 case AV_PIX_FMT_YUV422P12LE:
478 case AV_PIX_FMT_YUV422P14BE:
479 case AV_PIX_FMT_YUV422P14LE:
480 case AV_PIX_FMT_YUV444P12BE:
481 case AV_PIX_FMT_YUV444P12LE:
482 case AV_PIX_FMT_YUV444P14BE:
483 case AV_PIX_FMT_YUV444P14LE:
484 case AV_PIX_FMT_YUVJ411P:
492 FFmpegExaminer::has_video () const
494 return static_cast<bool> (_video_stream);