2 Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <libavcodec/avcodec.h>
22 #include <libavformat/avformat.h>
23 #include <libavutil/pixfmt.h>
24 #include <libavutil/pixdesc.h>
26 #include "ffmpeg_examiner.h"
27 #include "ffmpeg_content.h"
29 #include "ffmpeg_audio_stream.h"
30 #include "ffmpeg_subtitle_stream.h"
32 #include "safe_stringstream.h"
33 #include <boost/foreach.hpp>
41 using boost::shared_ptr;
42 using boost::optional;
44 /** @param job job that the examiner is operating in, or 0 */
45 FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c, shared_ptr<Job> job)
48 , _need_video_length (false)
50 /* Find audio and subtitle streams */
52 for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
53 AVStream* s = _format_context->streams[i];
54 if (s->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
56 /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up,
57 so bodge it here. No idea why we should have to do this.
60 if (s->codec->channel_layout == 0) {
61 s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels);
64 _audio_streams.push_back (
65 shared_ptr<FFmpegAudioStream> (
66 new FFmpegAudioStream (audio_stream_name (s), s->id, s->codec->sample_rate, s->codec->channels)
70 } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
71 _subtitle_streams.push_back (shared_ptr<FFmpegSubtitleStream> (new FFmpegSubtitleStream (subtitle_stream_name (s), s->id)));
75 /* See if the header has duration information in it */
76 _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
77 if (!_need_video_length) {
78 _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
82 if (_need_video_length) {
83 job->sub (_("Finding length and subtitles"));
85 job->sub (_("Finding subtitles"));
89 /* Run through until we find:
91 * - the first audio for each stream.
92 * - the subtitle periods for each stream.
94 * We have to note subtitle periods as otherwise we have no way of knowing
95 * where we should look for subtitles (video and audio are always present,
99 int64_t const len = _file_group.length ();
101 int r = av_read_frame (_format_context, &_packet);
108 job->set_progress (float (_format_context->pb->pos) / len);
110 job->set_progress_unknown ();
114 AVCodecContext* context = _format_context->streams[_packet.stream_index]->codec;
116 if (_packet.stream_index == _video_stream) {
117 video_packet (context);
120 bool got_all_audio = true;
122 for (size_t i = 0; i < _audio_streams.size(); ++i) {
123 if (_audio_streams[i]->uses_index (_format_context, _packet.stream_index)) {
124 audio_packet (context, _audio_streams[i]);
126 if (!_audio_streams[i]->first_audio) {
127 got_all_audio = false;
131 for (size_t i = 0; i < _subtitle_streams.size(); ++i) {
132 if (_subtitle_streams[i]->uses_index (_format_context, _packet.stream_index)) {
133 subtitle_packet (context, _subtitle_streams[i]);
137 av_packet_unref (&_packet);
139 if (_first_video && got_all_audio && _subtitle_streams.empty ()) {
145 /* Finish off any hanging subtitles at the end */
146 for (LastSubtitleMap::const_iterator i = _last_subtitle_start.begin(); i != _last_subtitle_start.end(); ++i) {
148 if (i->second->image) {
149 i->first->add_image_subtitle (
153 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
157 i->first->add_text_subtitle (
161 ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
168 /* We just added subtitles to our streams without taking the PTS offset into account;
169 this is because we might not know the PTS offset when the first subtitle is seen.
170 Now we know the PTS offset so we can apply it to those subtitles.
172 if (video_frame_rate()) {
173 BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, _subtitle_streams) {
174 i->add_offset (pts_offset (_audio_streams, _first_video, video_frame_rate().get()));
180 FFmpegExaminer::video_packet (AVCodecContext* context)
182 if (_first_video && !_need_video_length) {
187 if (avcodec_decode_video2 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
189 _first_video = frame_time (_format_context->streams[_video_stream]);
191 if (_need_video_length) {
192 _video_length = frame_time (
193 _format_context->streams[_video_stream]
194 ).get_value_or (ContentTime ()).frames_round (video_frame_rate().get ());
200 FFmpegExaminer::audio_packet (AVCodecContext* context, shared_ptr<FFmpegAudioStream> stream)
202 if (stream->first_audio) {
207 if (avcodec_decode_audio4 (context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) {
208 stream->first_audio = frame_time (stream->stream (_format_context));
213 FFmpegExaminer::subtitle_packet (AVCodecContext* context, shared_ptr<FFmpegSubtitleStream> stream)
217 if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
218 string id = subtitle_id (sub);
219 FFmpegSubtitlePeriod const period = subtitle_period (sub);
220 bool const starts_image = subtitle_starts_image (sub);
222 LastSubtitleMap::iterator last = _last_subtitle_start.find (stream);
223 if (last != _last_subtitle_start.end() && last->second) {
224 /* We have seen the start of a subtitle but not yet the end. Whatever this is
225 finishes the previous subtitle, so add it */
226 if (last->second->image) {
227 stream->add_image_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
229 stream->add_text_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
231 if (sub.num_rects == 0) {
232 /* This is a `proper' end-of-subtitle */
233 _last_subtitle_start[stream] = optional<SubtitleStart> ();
235 /* This is just another subtitle, so we start again */
236 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
238 } else if (sub.num_rects == 1) {
241 stream->add_image_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
243 stream->add_text_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
246 _last_subtitle_start[stream] = SubtitleStart (id, starts_image, period.from);
250 for (unsigned int i = 0; i < sub.num_rects; ++i) {
251 if (sub.rects[i]->type == SUBTITLE_BITMAP) {
252 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
253 uint32_t* palette = (uint32_t *) sub.rects[i]->pict.data[1];
254 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
256 (palette[j] & 0x00ff0000) >> 16,
257 (palette[j] & 0x0000ff00) >> 8,
258 (palette[j] & 0x000000ff) >> 0,
259 (palette[j] & 0xff000000) >> 24
262 stream->set_colour (rgba, rgba);
265 uint32_t* palette = (uint32_t *) sub.rects[i]->data[1];
266 for (int j = 0; j < sub.rects[i]->nb_colors; ++j) {
268 (palette[j] & 0x00ff0000) >> 16,
269 (palette[j] & 0x0000ff00) >> 8,
270 (palette[j] & 0x000000ff) >> 0,
271 (palette[j] & 0xff000000) >> 24
274 stream->set_colour (rgba, rgba);
280 avsubtitle_free (&sub);
284 optional<ContentTime>
285 FFmpegExaminer::frame_time (AVStream* s) const
287 optional<ContentTime> t;
289 int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
290 if (bet != AV_NOPTS_VALUE) {
291 t = ContentTime::from_seconds (bet * av_q2d (s->time_base));
298 FFmpegExaminer::video_frame_rate () const
300 /* This use of r_frame_rate is debateable; there's a few different
301 * frame rates in the format context, but this one seems to be the most
304 return av_q2d (av_stream_get_r_frame_rate (_format_context->streams[_video_stream]));
308 FFmpegExaminer::video_size () const
310 return dcp::Size (video_codec_context()->width, video_codec_context()->height);
313 /** @return Length according to our content's header */
315 FFmpegExaminer::video_length () const
317 return max (Frame (1), _video_length);
321 FFmpegExaminer::sample_aspect_ratio () const
323 AVRational sar = av_guess_sample_aspect_ratio (_format_context, _format_context->streams[_video_stream], 0);
325 /* I assume this means that we don't know */
326 return optional<double> ();
328 return double (sar.num) / sar.den;
332 FFmpegExaminer::audio_stream_name (AVStream* s) const
336 n << stream_name (s);
338 if (!n.str().empty()) {
342 n << s->codec->channels << " channels";
348 FFmpegExaminer::subtitle_stream_name (AVStream* s) const
352 n << stream_name (s);
354 if (n.str().empty()) {
362 FFmpegExaminer::stream_name (AVStream* s) const
367 AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
372 AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
374 if (!n.str().empty()) {
385 FFmpegExaminer::bits_per_pixel () const
387 if (video_codec_context()->pix_fmt == -1) {
388 throw DecodeError (_("Could not find pixel format for video."));
391 AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (video_codec_context()->pix_fmt);
392 DCPOMATIC_ASSERT (d);
393 return av_get_bits_per_pixel (d);
397 FFmpegExaminer::yuv () const
399 switch (video_codec_context()->pix_fmt) {
400 case AV_PIX_FMT_YUV420P:
401 case AV_PIX_FMT_YUYV422:
402 case AV_PIX_FMT_YUV422P:
403 case AV_PIX_FMT_YUV444P:
404 case AV_PIX_FMT_YUV410P:
405 case AV_PIX_FMT_YUV411P:
406 case AV_PIX_FMT_YUVJ420P:
407 case AV_PIX_FMT_YUVJ422P:
408 case AV_PIX_FMT_YUVJ444P:
409 case AV_PIX_FMT_UYVY422:
410 case AV_PIX_FMT_UYYVYY411:
411 case AV_PIX_FMT_NV12:
412 case AV_PIX_FMT_NV21:
413 case AV_PIX_FMT_YUV440P:
414 case AV_PIX_FMT_YUVJ440P:
415 case AV_PIX_FMT_YUVA420P:
416 case AV_PIX_FMT_YUV420P16LE:
417 case AV_PIX_FMT_YUV420P16BE:
418 case AV_PIX_FMT_YUV422P16LE:
419 case AV_PIX_FMT_YUV422P16BE:
420 case AV_PIX_FMT_YUV444P16LE:
421 case AV_PIX_FMT_YUV444P16BE:
422 case AV_PIX_FMT_YUV420P9BE:
423 case AV_PIX_FMT_YUV420P9LE:
424 case AV_PIX_FMT_YUV420P10BE:
425 case AV_PIX_FMT_YUV420P10LE:
426 case AV_PIX_FMT_YUV422P10BE:
427 case AV_PIX_FMT_YUV422P10LE:
428 case AV_PIX_FMT_YUV444P9BE:
429 case AV_PIX_FMT_YUV444P9LE:
430 case AV_PIX_FMT_YUV444P10BE:
431 case AV_PIX_FMT_YUV444P10LE:
432 case AV_PIX_FMT_YUV422P9BE:
433 case AV_PIX_FMT_YUV422P9LE:
434 case AV_PIX_FMT_YUVA420P9BE:
435 case AV_PIX_FMT_YUVA420P9LE:
436 case AV_PIX_FMT_YUVA422P9BE:
437 case AV_PIX_FMT_YUVA422P9LE:
438 case AV_PIX_FMT_YUVA444P9BE:
439 case AV_PIX_FMT_YUVA444P9LE:
440 case AV_PIX_FMT_YUVA420P10BE:
441 case AV_PIX_FMT_YUVA420P10LE:
442 case AV_PIX_FMT_YUVA422P10BE:
443 case AV_PIX_FMT_YUVA422P10LE:
444 case AV_PIX_FMT_YUVA444P10BE:
445 case AV_PIX_FMT_YUVA444P10LE:
446 case AV_PIX_FMT_YUVA420P16BE:
447 case AV_PIX_FMT_YUVA420P16LE:
448 case AV_PIX_FMT_YUVA422P16BE:
449 case AV_PIX_FMT_YUVA422P16LE:
450 case AV_PIX_FMT_YUVA444P16BE:
451 case AV_PIX_FMT_YUVA444P16LE:
452 case AV_PIX_FMT_NV16:
453 case AV_PIX_FMT_NV20LE:
454 case AV_PIX_FMT_NV20BE:
455 case AV_PIX_FMT_YVYU422:
456 case AV_PIX_FMT_YUVA444P:
457 case AV_PIX_FMT_YUVA422P:
458 case AV_PIX_FMT_YUV420P12BE:
459 case AV_PIX_FMT_YUV420P12LE:
460 case AV_PIX_FMT_YUV420P14BE:
461 case AV_PIX_FMT_YUV420P14LE:
462 case AV_PIX_FMT_YUV422P12BE:
463 case AV_PIX_FMT_YUV422P12LE:
464 case AV_PIX_FMT_YUV422P14BE:
465 case AV_PIX_FMT_YUV422P14LE:
466 case AV_PIX_FMT_YUV444P12BE:
467 case AV_PIX_FMT_YUV444P12LE:
468 case AV_PIX_FMT_YUV444P14BE:
469 case AV_PIX_FMT_YUV444P14LE:
470 case AV_PIX_FMT_YUVJ411P: