Ignore FFmpeg warnings in a nicer way.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
56 #include <vector>
57 #include <iomanip>
58 #include <iostream>
59 #include <stdint.h>
60
61 #include "i18n.h"
62
63 using std::cout;
64 using std::string;
65 using std::vector;
66 using std::list;
67 using std::min;
68 using std::pair;
69 using std::max;
70 using std::map;
71 using boost::shared_ptr;
72 using boost::is_any_of;
73 using boost::split;
74 using boost::optional;
75 using boost::dynamic_pointer_cast;
76 using dcp::Size;
77 using namespace dcpomatic;
78
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
80         : FFmpeg (c)
81         , Decoder (film)
82         , _have_current_subtitle (false)
83 {
84         if (c->video && c->video->use()) {
85                 video.reset (new VideoDecoder (this, c));
86                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89                 _black_image->make_black ();
90         } else {
91                 _pts_offset = ContentTime ();
92         }
93
94         if (c->audio) {
95                 audio.reset (new AudioDecoder (this, c->audio, fast));
96         }
97
98         if (c->only_text()) {
99                 /* XXX: this time here should be the time of the first subtitle, not 0 */
100                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
101         }
102
103         _next_time.resize (_format_context->nb_streams);
104 }
105
106 void
107 FFmpegDecoder::flush ()
108 {
109         /* Get any remaining frames */
110
111         _packet.data = 0;
112         _packet.size = 0;
113
114         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115
116         while (video && decode_video_packet()) {}
117
118         if (audio) {
119                 decode_audio_packet ();
120         }
121
122         /* Make sure all streams are the same length and round up to the next video frame */
123
124         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126         full_length = full_length.ceil (frc.source);
127         if (video) {
128                 double const vfr = _ffmpeg_content->video_frame_rate().get();
129                 Frame const f = full_length.frames_round (vfr);
130                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131                 while (v < f) {
132                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
133                         ++v;
134                 }
135         }
136
137         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138                 ContentTime a = audio->stream_position(film(), i);
139                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140                    there has been no data on it since the last seek.  In this case we'll just do nothing
141                    here.  I'm not sure if that's the right idea.
142                 */
143                 if (a > ContentTime()) {
144                         while (a < full_length) {
145                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147                                 silence->make_silent ();
148                                 audio->emit (film(), i, silence, a);
149                                 a += to_do;
150                         }
151                 }
152         }
153
154         if (audio) {
155                 audio->flush ();
156         }
157 }
158
159 bool
160 FFmpegDecoder::pass ()
161 {
162 #ifdef DCPOMATIC_VARIANT_SWAROOP
163         if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
164                 return true;
165         }
166 #endif
167
168         int r = av_read_frame (_format_context, &_packet);
169
170         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
171            has pretty-much succeeded (and hence generated data which should be processed).
172            Hence it makes sense to continue here in that case.
173         */
174         if (r < 0 && r != AVERROR_INVALIDDATA) {
175                 if (r != AVERROR_EOF) {
176                         /* Maybe we should fail here, but for now we'll just finish off instead */
177                         char buf[256];
178                         av_strerror (r, buf, sizeof(buf));
179                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
180                 }
181
182                 flush ();
183                 return true;
184         }
185
186         int const si = _packet.stream_index;
187         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
188
189         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
190                 decode_video_packet ();
191         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
192                 decode_subtitle_packet ();
193         } else {
194                 decode_audio_packet ();
195         }
196
197         av_packet_unref (&_packet);
198         return false;
199 }
200
201 /** @param data pointer to array of pointers to buffers.
202  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
203  */
204 shared_ptr<AudioBuffers>
205 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
206 {
207         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
208
209 DCPOMATIC_DISABLE_WARNINGS
210         int const size = av_samples_get_buffer_size (
211                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
212                 );
213 DCPOMATIC_ENABLE_WARNINGS
214
215         /* XXX: can't we just use _frame->nb_samples directly here? */
216         /* XXX: can't we use swr_convert() to do the format conversion? */
217
218         /* Deinterleave and convert to float */
219
220         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
221            of the block that do not form a complete sample or frame they will be dropped.
222         */
223         int const total_samples = size / bytes_per_audio_sample (stream);
224         int const channels = stream->channels();
225         int const frames = total_samples / channels;
226         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
227         float** data = audio->data();
228
229         switch (audio_sample_format (stream)) {
230         case AV_SAMPLE_FMT_U8:
231         {
232                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
233                 int sample = 0;
234                 int channel = 0;
235                 for (int i = 0; i < total_samples; ++i) {
236                         data[channel][sample] = float(*p++) / (1 << 23);
237
238                         ++channel;
239                         if (channel == channels) {
240                                 channel = 0;
241                                 ++sample;
242                         }
243                 }
244         }
245         break;
246
247         case AV_SAMPLE_FMT_S16:
248         {
249                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
250                 int sample = 0;
251                 int channel = 0;
252                 for (int i = 0; i < total_samples; ++i) {
253                         data[channel][sample] = float(*p++) / (1 << 15);
254
255                         ++channel;
256                         if (channel == channels) {
257                                 channel = 0;
258                                 ++sample;
259                         }
260                 }
261         }
262         break;
263
264         case AV_SAMPLE_FMT_S16P:
265         {
266                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
267                 for (int i = 0; i < channels; ++i) {
268                         for (int j = 0; j < frames; ++j) {
269                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
270                         }
271                 }
272         }
273         break;
274
275         case AV_SAMPLE_FMT_S32:
276         {
277                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
278                 int sample = 0;
279                 int channel = 0;
280                 for (int i = 0; i < total_samples; ++i) {
281                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
282
283                         ++channel;
284                         if (channel == channels) {
285                                 channel = 0;
286                                 ++sample;
287                         }
288                 }
289         }
290         break;
291
292         case AV_SAMPLE_FMT_S32P:
293         {
294                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
295                 for (int i = 0; i < channels; ++i) {
296                         for (int j = 0; j < frames; ++j) {
297                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
298                         }
299                 }
300         }
301         break;
302
303         case AV_SAMPLE_FMT_FLT:
304         {
305                 float* p = reinterpret_cast<float*> (_frame->data[0]);
306                 int sample = 0;
307                 int channel = 0;
308                 for (int i = 0; i < total_samples; ++i) {
309                         data[channel][sample] = *p++;
310
311                         ++channel;
312                         if (channel == channels) {
313                                 channel = 0;
314                                 ++sample;
315                         }
316                 }
317         }
318         break;
319
320         case AV_SAMPLE_FMT_FLTP:
321         {
322                 float** p = reinterpret_cast<float**> (_frame->data);
323                 DCPOMATIC_ASSERT (_frame->channels <= channels);
324                 /* Sometimes there aren't as many channels in the _frame as in the stream */
325                 for (int i = 0; i < _frame->channels; ++i) {
326                         memcpy (data[i], p[i], frames * sizeof(float));
327                 }
328                 for (int i = _frame->channels; i < channels; ++i) {
329                         audio->make_silent (i);
330                 }
331         }
332         break;
333
334         default:
335                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
336         }
337
338         return audio;
339 }
340
341 AVSampleFormat
342 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
343 {
344 DCPOMATIC_DISABLE_WARNINGS
345         return stream->stream (_format_context)->codec->sample_fmt;
346 DCPOMATIC_ENABLE_WARNINGS
347 }
348
349 int
350 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
351 {
352         return av_get_bytes_per_sample (audio_sample_format (stream));
353 }
354
355 void
356 FFmpegDecoder::seek (ContentTime time, bool accurate)
357 {
358         Decoder::seek (time, accurate);
359
360         /* If we are doing an `accurate' seek, we need to use pre-roll, as
361            we don't really know what the seek will give us.
362         */
363
364         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
365         time -= pre_roll;
366
367         /* XXX: it seems debatable whether PTS should be used here...
368            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
369         */
370
371         optional<int> stream;
372
373         if (_video_stream) {
374                 stream = _video_stream;
375         } else {
376                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
377                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
378                 if (s) {
379                         stream = s->index (_format_context);
380                 }
381         }
382
383         DCPOMATIC_ASSERT (stream);
384
385         ContentTime u = time - _pts_offset;
386         if (u < ContentTime ()) {
387                 u = ContentTime ();
388         }
389         av_seek_frame (
390                 _format_context,
391                 stream.get(),
392                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
393                 AVSEEK_FLAG_BACKWARD
394                 );
395
396         {
397                 /* Force re-creation of filter graphs to reset them and hence to make sure
398                    they don't have any pre-seek frames knocking about.
399                 */
400                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
401                 _filter_graphs.clear ();
402         }
403
404         if (video_codec_context ()) {
405                 avcodec_flush_buffers (video_codec_context());
406         }
407
408 DCPOMATIC_DISABLE_WARNINGS
409         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
410                 avcodec_flush_buffers (i->stream(_format_context)->codec);
411         }
412 DCPOMATIC_ENABLE_WARNINGS
413
414         if (subtitle_codec_context ()) {
415                 avcodec_flush_buffers (subtitle_codec_context ());
416         }
417
418         _have_current_subtitle = false;
419 }
420
421 void
422 FFmpegDecoder::decode_audio_packet ()
423 {
424         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
425            several times.
426         */
427
428         AVPacket copy_packet = _packet;
429         int const stream_index = copy_packet.stream_index;
430
431         /* XXX: inefficient */
432         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
433         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
434         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
435                 ++stream;
436         }
437
438         if (stream == streams.end ()) {
439                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
440                 return;
441         }
442
443 DCPOMATIC_DISABLE_WARNINGS
444         while (copy_packet.size > 0) {
445
446                 int frame_finished;
447                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
448                 if (decode_result < 0) {
449                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
450                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
451                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
452                            even in the face of such an error, so I think we should too.
453
454                            Returning from the method here caused mantis #352.
455                         */
456                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
457
458                         /* Fudge decode_result so that we come out of the while loop when
459                            we've processed this data.
460                         */
461                         decode_result = copy_packet.size;
462                 }
463
464                 if (frame_finished) {
465                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
466
467                         ContentTime ct;
468                         if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
469                                 /* In some streams we see not every frame coming through with a timestamp; for those
470                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
471                                    particularly noticeable with TrueHD streams (see #1111).
472                                 */
473                                 ct = *_next_time[stream_index];
474                         } else {
475                                 ct = ContentTime::from_seconds (
476                                         av_frame_get_best_effort_timestamp (_frame) *
477                                         av_q2d ((*stream)->stream (_format_context)->time_base))
478                                         + _pts_offset;
479                         }
480
481                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
482
483                         if (ct < ContentTime ()) {
484                                 /* Discard audio data that comes before time 0 */
485                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
486                                 data->move (data->frames() - remove, remove, 0);
487                                 data->set_frames (data->frames() - remove);
488                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
489                         }
490
491                         if (ct < ContentTime()) {
492                                 LOG_WARNING (
493                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
494                                         to_string(ct),
495                                         data->frames(),
496                                         copy_packet.stream_index,
497                                         copy_packet.pts,
498                                         av_frame_get_best_effort_timestamp(_frame),
499                                         av_q2d((*stream)->stream(_format_context)->time_base),
500                                         to_string(_pts_offset)
501                                         );
502                         }
503 DCPOMATIC_ENABLE_WARNINGS
504
505                         /* Give this data provided there is some, and its time is sane */
506                         if (ct >= ContentTime() && data->frames() > 0) {
507                                 audio->emit (film(), *stream, data, ct);
508                         }
509                 }
510
511                 copy_packet.data += decode_result;
512                 copy_packet.size -= decode_result;
513         }
514 }
515
516 bool
517 FFmpegDecoder::decode_video_packet ()
518 {
519         DCPOMATIC_ASSERT (_video_stream);
520
521         int frame_finished;
522 DCPOMATIC_DISABLE_WARNINGS
523         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
524                 return false;
525         }
526 DCPOMATIC_ENABLE_WARNINGS
527
528         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
529
530         shared_ptr<VideoFilterGraph> graph;
531
532         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
533         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
534                 ++i;
535         }
536
537         if (i == _filter_graphs.end ()) {
538                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
539                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
540                 graph->setup (_ffmpeg_content->filters ());
541                 _filter_graphs.push_back (graph);
542                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
543         } else {
544                 graph = *i;
545         }
546
547         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
548
549         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
550
551                 shared_ptr<Image> image = i->first;
552
553                 if (i->second != AV_NOPTS_VALUE) {
554                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
555
556                         video->emit (
557                                 film(),
558                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
559                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
560                                 );
561                 } else {
562                         LOG_WARNING_NC ("Dropping frame without PTS");
563                 }
564         }
565
566         return true;
567 }
568
569 void
570 FFmpegDecoder::decode_subtitle_packet ()
571 {
572         int got_subtitle;
573         AVSubtitle sub;
574         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
575                 return;
576         }
577
578         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
579         if (_have_current_subtitle) {
580                 if (_current_subtitle_to) {
581                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
582                 } else {
583                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
584                 }
585                 _have_current_subtitle = false;
586         }
587
588         if (sub.num_rects <= 0) {
589                 /* Nothing new in this subtitle */
590                 return;
591         }
592
593         /* Subtitle PTS (within the source, not taking into account any of the
594            source that we may have chopped off for the DCP).
595         */
596         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
597         ContentTime from;
598         from = sub_period.from + _pts_offset;
599         if (sub_period.to) {
600                 _current_subtitle_to = *sub_period.to + _pts_offset;
601         } else {
602                 _current_subtitle_to = optional<ContentTime>();
603                 _have_current_subtitle = true;
604         }
605
606         for (unsigned int i = 0; i < sub.num_rects; ++i) {
607                 AVSubtitleRect const * rect = sub.rects[i];
608
609                 switch (rect->type) {
610                 case SUBTITLE_NONE:
611                         break;
612                 case SUBTITLE_BITMAP:
613                         decode_bitmap_subtitle (rect, from);
614                         break;
615                 case SUBTITLE_TEXT:
616                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
617                         break;
618                 case SUBTITLE_ASS:
619                         decode_ass_subtitle (rect->ass, from);
620                         break;
621                 }
622         }
623
624         if (_current_subtitle_to) {
625                 only_text()->emit_stop (*_current_subtitle_to);
626         }
627
628         avsubtitle_free (&sub);
629 }
630
631 void
632 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
633 {
634         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
635            G, third R, fourth A.
636         */
637         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
638
639 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
640         /* Start of the first line in the subtitle */
641         uint8_t* sub_p = rect->pict.data[0];
642         /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
643            (i.e. first byte B, second G, third R, fourth A)
644         */
645         uint8_t const * palette = rect->pict.data[1];
646 #else
647         /* Start of the first line in the subtitle */
648         uint8_t* sub_p = rect->data[0];
649         /* sub_p looks up into a BGRA palette which is at rect->data[1].
650            (first byte B, second G, third R, fourth A)
651         */
652         uint8_t const * palette = rect->data[1];
653 #endif
654         /* And the stream has a map of those palette colours to colours
655            chosen by the user; created a `mapped' palette from those settings.
656         */
657         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
658         vector<RGBA> mapped_palette (rect->nb_colors);
659         for (int i = 0; i < rect->nb_colors; ++i) {
660                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
661                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
662                 if (j != colour_map.end ()) {
663                         mapped_palette[i] = j->second;
664                 } else {
665                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
666                            it is from a project that was created before this stuff was added.  Just use the
667                            colour straight from the original palette.
668                         */
669                         mapped_palette[i] = c;
670                 }
671                 palette += 4;
672         }
673
674         /* Start of the output data */
675         uint8_t* out_p = image->data()[0];
676
677         for (int y = 0; y < rect->h; ++y) {
678                 uint8_t* sub_line_p = sub_p;
679                 uint8_t* out_line_p = out_p;
680                 for (int x = 0; x < rect->w; ++x) {
681                         RGBA const p = mapped_palette[*sub_line_p++];
682                         *out_line_p++ = p.b;
683                         *out_line_p++ = p.g;
684                         *out_line_p++ = p.r;
685                         *out_line_p++ = p.a;
686                 }
687 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
688                 sub_p += rect->pict.linesize[0];
689 #else
690                 sub_p += rect->linesize[0];
691 #endif
692                 out_p += image->stride()[0];
693         }
694
695         int target_width = subtitle_codec_context()->width;
696         if (target_width == 0 && video_codec_context()) {
697                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
698                    know if it's supposed to mean something from FFmpeg's point of view.
699                 */
700                 target_width = video_codec_context()->width;
701         }
702         int target_height = subtitle_codec_context()->height;
703         if (target_height == 0 && video_codec_context()) {
704                 target_height = video_codec_context()->height;
705         }
706         DCPOMATIC_ASSERT (target_width);
707         DCPOMATIC_ASSERT (target_height);
708         dcpomatic::Rect<double> const scaled_rect (
709                 static_cast<double> (rect->x) / target_width,
710                 static_cast<double> (rect->y) / target_height,
711                 static_cast<double> (rect->w) / target_width,
712                 static_cast<double> (rect->h) / target_height
713                 );
714
715         only_text()->emit_bitmap_start (from, image, scaled_rect);
716 }
717
718 void
719 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
720 {
721         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
722            produces a single format of Dialogue: lines...
723         */
724
725         int commas = 0;
726         string text;
727         for (size_t i = 0; i < ass.length(); ++i) {
728                 if (commas < 9 && ass[i] == ',') {
729                         ++commas;
730                 } else if (commas == 9) {
731                         text += ass[i];
732                 }
733         }
734
735         if (text.empty ()) {
736                 return;
737         }
738
739         sub::RawSubtitle base;
740         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
741                 base,
742                 text,
743                 _ffmpeg_content->video->size().width,
744                 _ffmpeg_content->video->size().height
745                 );
746
747         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
748                 only_text()->emit_plain_start (from, i);
749         }
750 }