Merge branch 'v2.15.x' of ssh://git.carlh.net/home/carl/git/dcpomatic into v2.15.x
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
56 #include <vector>
57 #include <iomanip>
58 #include <iostream>
59 #include <stdint.h>
60
61 #include "i18n.h"
62
63 using std::cout;
64 using std::string;
65 using std::vector;
66 using std::list;
67 using std::min;
68 using std::pair;
69 using std::max;
70 using std::map;
71 using boost::shared_ptr;
72 using boost::is_any_of;
73 using boost::split;
74 using boost::optional;
75 using boost::dynamic_pointer_cast;
76 using dcp::Size;
77 using namespace dcpomatic;
78
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
80         : FFmpeg (c)
81         , Decoder (film)
82         , _have_current_subtitle (false)
83 {
84         if (c->video && c->video->use()) {
85                 video.reset (new VideoDecoder (this, c));
86                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89                 _black_image->make_black ();
90         } else {
91                 _pts_offset = ContentTime ();
92         }
93
94         if (c->audio) {
95                 audio.reset (new AudioDecoder (this, c->audio, fast));
96         }
97
98         if (c->only_text()) {
99                 /* XXX: this time here should be the time of the first subtitle, not 0 */
100                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
101         }
102
103         _next_time.resize (_format_context->nb_streams);
104 }
105
106 void
107 FFmpegDecoder::flush ()
108 {
109         /* Get any remaining frames */
110
111         _packet.data = 0;
112         _packet.size = 0;
113
114         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115
116         while (video && decode_video_packet()) {}
117
118         if (audio) {
119                 decode_audio_packet ();
120         }
121
122         /* Make sure all streams are the same length and round up to the next video frame */
123
124         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126         full_length = full_length.ceil (frc.source);
127         if (video) {
128                 double const vfr = _ffmpeg_content->video_frame_rate().get();
129                 Frame const f = full_length.frames_round (vfr);
130                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131                 while (v < f) {
132                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
133                         ++v;
134                 }
135         }
136
137         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138                 ContentTime a = audio->stream_position(film(), i);
139                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140                    there has been no data on it since the last seek.  In this case we'll just do nothing
141                    here.  I'm not sure if that's the right idea.
142                 */
143                 if (a > ContentTime()) {
144                         while (a < full_length) {
145                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147                                 silence->make_silent ();
148                                 audio->emit (film(), i, silence, a);
149                                 a += to_do;
150                         }
151                 }
152         }
153
154         if (audio) {
155                 audio->flush ();
156         }
157 }
158
159 bool
160 FFmpegDecoder::pass ()
161 {
162 #ifdef DCPOMATIC_VARIANT_SWAROOP
163         if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) {
164                 return true;
165         }
166 #endif
167
168         int r = av_read_frame (_format_context, &_packet);
169
170         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
171            has pretty-much succeeded (and hence generated data which should be processed).
172            Hence it makes sense to continue here in that case.
173         */
174         if (r < 0 && r != AVERROR_INVALIDDATA) {
175                 if (r != AVERROR_EOF) {
176                         /* Maybe we should fail here, but for now we'll just finish off instead */
177                         char buf[256];
178                         av_strerror (r, buf, sizeof(buf));
179                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
180                 }
181
182                 flush ();
183                 return true;
184         }
185
186         int const si = _packet.stream_index;
187         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
188
189         if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
190                 decode_video_packet ();
191         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
192                 decode_subtitle_packet ();
193         } else {
194                 decode_audio_packet ();
195         }
196
197         av_packet_unref (&_packet);
198         return false;
199 }
200
201 /** @param data pointer to array of pointers to buffers.
202  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
203  */
204 shared_ptr<AudioBuffers>
205 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
206 {
207         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
208
209         int const size = av_samples_get_buffer_size (
210                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
211                 );
212
213         /* Deinterleave and convert to float */
214
215         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
216            of the block that do not form a complete sample or frame they will be dropped.
217         */
218         int const total_samples = size / bytes_per_audio_sample (stream);
219         int const channels = stream->channels();
220         int const frames = total_samples / channels;
221         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
222         float** data = audio->data();
223
224         switch (audio_sample_format (stream)) {
225         case AV_SAMPLE_FMT_U8:
226         {
227                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
228                 int sample = 0;
229                 int channel = 0;
230                 for (int i = 0; i < total_samples; ++i) {
231                         data[channel][sample] = float(*p++) / (1 << 23);
232
233                         ++channel;
234                         if (channel == channels) {
235                                 channel = 0;
236                                 ++sample;
237                         }
238                 }
239         }
240         break;
241
242         case AV_SAMPLE_FMT_S16:
243         {
244                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
245                 int sample = 0;
246                 int channel = 0;
247                 for (int i = 0; i < total_samples; ++i) {
248                         data[channel][sample] = float(*p++) / (1 << 15);
249
250                         ++channel;
251                         if (channel == channels) {
252                                 channel = 0;
253                                 ++sample;
254                         }
255                 }
256         }
257         break;
258
259         case AV_SAMPLE_FMT_S16P:
260         {
261                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
262                 for (int i = 0; i < channels; ++i) {
263                         for (int j = 0; j < frames; ++j) {
264                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
265                         }
266                 }
267         }
268         break;
269
270         case AV_SAMPLE_FMT_S32:
271         {
272                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
273                 int sample = 0;
274                 int channel = 0;
275                 for (int i = 0; i < total_samples; ++i) {
276                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
277
278                         ++channel;
279                         if (channel == channels) {
280                                 channel = 0;
281                                 ++sample;
282                         }
283                 }
284         }
285         break;
286
287         case AV_SAMPLE_FMT_S32P:
288         {
289                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
290                 for (int i = 0; i < channels; ++i) {
291                         for (int j = 0; j < frames; ++j) {
292                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
293                         }
294                 }
295         }
296         break;
297
298         case AV_SAMPLE_FMT_FLT:
299         {
300                 float* p = reinterpret_cast<float*> (_frame->data[0]);
301                 int sample = 0;
302                 int channel = 0;
303                 for (int i = 0; i < total_samples; ++i) {
304                         data[channel][sample] = *p++;
305
306                         ++channel;
307                         if (channel == channels) {
308                                 channel = 0;
309                                 ++sample;
310                         }
311                 }
312         }
313         break;
314
315         case AV_SAMPLE_FMT_FLTP:
316         {
317                 float** p = reinterpret_cast<float**> (_frame->data);
318                 DCPOMATIC_ASSERT (_frame->channels <= channels);
319                 /* Sometimes there aren't as many channels in the _frame as in the stream */
320                 for (int i = 0; i < _frame->channels; ++i) {
321                         memcpy (data[i], p[i], frames * sizeof(float));
322                 }
323                 for (int i = _frame->channels; i < channels; ++i) {
324                         audio->make_silent (i);
325                 }
326         }
327         break;
328
329         default:
330                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
331         }
332
333         return audio;
334 }
335
336 AVSampleFormat
337 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
338 {
339         return stream->stream (_format_context)->codec->sample_fmt;
340 }
341
342 int
343 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
344 {
345         return av_get_bytes_per_sample (audio_sample_format (stream));
346 }
347
348 void
349 FFmpegDecoder::seek (ContentTime time, bool accurate)
350 {
351         Decoder::seek (time, accurate);
352
353         /* If we are doing an `accurate' seek, we need to use pre-roll, as
354            we don't really know what the seek will give us.
355         */
356
357         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
358         time -= pre_roll;
359
360         /* XXX: it seems debatable whether PTS should be used here...
361            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
362         */
363
364         optional<int> stream;
365
366         if (_video_stream) {
367                 stream = _video_stream;
368         } else {
369                 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
370                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
371                 if (s) {
372                         stream = s->index (_format_context);
373                 }
374         }
375
376         DCPOMATIC_ASSERT (stream);
377
378         ContentTime u = time - _pts_offset;
379         if (u < ContentTime ()) {
380                 u = ContentTime ();
381         }
382         av_seek_frame (
383                 _format_context,
384                 stream.get(),
385                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
386                 AVSEEK_FLAG_BACKWARD
387                 );
388
389         {
390                 /* Force re-creation of filter graphs to reset them and hence to make sure
391                    they don't have any pre-seek frames knocking about.
392                 */
393                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
394                 _filter_graphs.clear ();
395         }
396
397         if (video_codec_context ()) {
398                 avcodec_flush_buffers (video_codec_context());
399         }
400
401         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
402                 avcodec_flush_buffers (i->stream(_format_context)->codec);
403         }
404
405         if (subtitle_codec_context ()) {
406                 avcodec_flush_buffers (subtitle_codec_context ());
407         }
408
409         _have_current_subtitle = false;
410 }
411
412 void
413 FFmpegDecoder::decode_audio_packet ()
414 {
415         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
416            several times.
417         */
418
419         AVPacket copy_packet = _packet;
420         int const stream_index = copy_packet.stream_index;
421
422         /* XXX: inefficient */
423         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
424         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
425         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
426                 ++stream;
427         }
428
429         if (stream == streams.end ()) {
430                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
431                 return;
432         }
433
434         while (copy_packet.size > 0) {
435
436                 int frame_finished;
437                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
438                 if (decode_result < 0) {
439                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
440                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
441                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
442                            even in the face of such an error, so I think we should too.
443
444                            Returning from the method here caused mantis #352.
445                         */
446                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
447
448                         /* Fudge decode_result so that we come out of the while loop when
449                            we've processed this data.
450                         */
451                         decode_result = copy_packet.size;
452                 }
453
454                 if (frame_finished) {
455                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
456
457                         ContentTime ct;
458                         if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
459                                 /* In some streams we see not every frame coming through with a timestamp; for those
460                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
461                                    particularly noticeable with TrueHD streams (see #1111).
462                                 */
463                                 ct = *_next_time[stream_index];
464                         } else {
465                                 ct = ContentTime::from_seconds (
466                                         av_frame_get_best_effort_timestamp (_frame) *
467                                         av_q2d ((*stream)->stream (_format_context)->time_base))
468                                         + _pts_offset;
469                         }
470
471                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
472
473                         if (ct < ContentTime ()) {
474                                 /* Discard audio data that comes before time 0 */
475                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
476                                 data->move (data->frames() - remove, remove, 0);
477                                 data->set_frames (data->frames() - remove);
478                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
479                         }
480
481                         if (ct < ContentTime()) {
482                                 LOG_WARNING (
483                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
484                                         to_string(ct),
485                                         data->frames(),
486                                         copy_packet.stream_index,
487                                         copy_packet.pts,
488                                         av_frame_get_best_effort_timestamp(_frame),
489                                         av_q2d((*stream)->stream(_format_context)->time_base),
490                                         to_string(_pts_offset)
491                                         );
492                         }
493
494                         /* Give this data provided there is some, and its time is sane */
495                         if (ct >= ContentTime() && data->frames() > 0) {
496                                 audio->emit (film(), *stream, data, ct);
497                         }
498                 }
499
500                 copy_packet.data += decode_result;
501                 copy_packet.size -= decode_result;
502         }
503 }
504
505 bool
506 FFmpegDecoder::decode_video_packet ()
507 {
508         DCPOMATIC_ASSERT (_video_stream);
509
510         int frame_finished;
511         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
512                 return false;
513         }
514
515         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
516
517         shared_ptr<VideoFilterGraph> graph;
518
519         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
520         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
521                 ++i;
522         }
523
524         if (i == _filter_graphs.end ()) {
525                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
526                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
527                 graph->setup (_ffmpeg_content->filters ());
528                 _filter_graphs.push_back (graph);
529                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
530         } else {
531                 graph = *i;
532         }
533
534         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
535
536         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
537
538                 shared_ptr<Image> image = i->first;
539
540                 if (i->second != AV_NOPTS_VALUE) {
541                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
542
543                         video->emit (
544                                 film(),
545                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
546                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
547                                 );
548                 } else {
549                         LOG_WARNING_NC ("Dropping frame without PTS");
550                 }
551         }
552
553         return true;
554 }
555
556 void
557 FFmpegDecoder::decode_subtitle_packet ()
558 {
559         int got_subtitle;
560         AVSubtitle sub;
561         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
562                 return;
563         }
564
565         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
566         if (_have_current_subtitle) {
567                 if (_current_subtitle_to) {
568                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
569                 } else {
570                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
571                 }
572                 _have_current_subtitle = false;
573         }
574
575         if (sub.num_rects <= 0) {
576                 /* Nothing new in this subtitle */
577                 return;
578         }
579
580         /* Subtitle PTS (within the source, not taking into account any of the
581            source that we may have chopped off for the DCP).
582         */
583         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
584         ContentTime from;
585         from = sub_period.from + _pts_offset;
586         if (sub_period.to) {
587                 _current_subtitle_to = *sub_period.to + _pts_offset;
588         } else {
589                 _current_subtitle_to = optional<ContentTime>();
590                 _have_current_subtitle = true;
591         }
592
593         for (unsigned int i = 0; i < sub.num_rects; ++i) {
594                 AVSubtitleRect const * rect = sub.rects[i];
595
596                 switch (rect->type) {
597                 case SUBTITLE_NONE:
598                         break;
599                 case SUBTITLE_BITMAP:
600                         decode_bitmap_subtitle (rect, from);
601                         break;
602                 case SUBTITLE_TEXT:
603                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
604                         break;
605                 case SUBTITLE_ASS:
606                         decode_ass_subtitle (rect->ass, from);
607                         break;
608                 }
609         }
610
611         if (_current_subtitle_to) {
612                 only_text()->emit_stop (*_current_subtitle_to);
613         }
614
615         avsubtitle_free (&sub);
616 }
617
618 void
619 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
620 {
621         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
622            G, third R, fourth A.
623         */
624         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
625
626 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
627         /* Start of the first line in the subtitle */
628         uint8_t* sub_p = rect->pict.data[0];
629         /* sub_p looks up into a BGRA palette which is here
630            (i.e. first byte B, second G, third R, fourth A)
631         */
632         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
633 #else
634         /* Start of the first line in the subtitle */
635         uint8_t* sub_p = rect->data[0];
636         /* sub_p looks up into a BGRA palette which is at rect->data[1].
637            (first byte B, second G, third R, fourth A)
638         */
639 #endif
640         /* And the stream has a map of those palette colours to colours
641            chosen by the user; created a `mapped' palette from those settings.
642         */
643         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
644         vector<RGBA> mapped_palette (rect->nb_colors);
645         uint8_t const * palette = rect->data[1];
646         for (int i = 0; i < rect->nb_colors; ++i) {
647                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
648                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
649                 if (j != colour_map.end ()) {
650                         mapped_palette[i] = j->second;
651                 } else {
652                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
653                            it is from a project that was created before this stuff was added.  Just use the
654                            colour straight from the original palette.
655                         */
656                         mapped_palette[i] = c;
657                 }
658                 palette += 4;
659         }
660
661         /* Start of the output data */
662         uint8_t* out_p = image->data()[0];
663
664         for (int y = 0; y < rect->h; ++y) {
665                 uint8_t* sub_line_p = sub_p;
666                 uint8_t* out_line_p = out_p;
667                 for (int x = 0; x < rect->w; ++x) {
668                         RGBA const p = mapped_palette[*sub_line_p++];
669                         *out_line_p++ = p.b;
670                         *out_line_p++ = p.g;
671                         *out_line_p++ = p.r;
672                         *out_line_p++ = p.a;
673                 }
674 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
675                 sub_p += rect->pict.linesize[0];
676 #else
677                 sub_p += rect->linesize[0];
678 #endif
679                 out_p += image->stride()[0];
680         }
681
682         int target_width = subtitle_codec_context()->width;
683         if (target_width == 0 && video_codec_context()) {
684                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
685                    know if it's supposed to mean something from FFmpeg's point of view.
686                 */
687                 target_width = video_codec_context()->width;
688         }
689         int target_height = subtitle_codec_context()->height;
690         if (target_height == 0 && video_codec_context()) {
691                 target_height = video_codec_context()->height;
692         }
693         DCPOMATIC_ASSERT (target_width);
694         DCPOMATIC_ASSERT (target_height);
695         dcpomatic::Rect<double> const scaled_rect (
696                 static_cast<double> (rect->x) / target_width,
697                 static_cast<double> (rect->y) / target_height,
698                 static_cast<double> (rect->w) / target_width,
699                 static_cast<double> (rect->h) / target_height
700                 );
701
702         only_text()->emit_bitmap_start (from, image, scaled_rect);
703 }
704
705 void
706 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
707 {
708         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
709            produces a single format of Dialogue: lines...
710         */
711
712         int commas = 0;
713         string text;
714         for (size_t i = 0; i < ass.length(); ++i) {
715                 if (commas < 9 && ass[i] == ',') {
716                         ++commas;
717                 } else if (commas == 9) {
718                         text += ass[i];
719                 }
720         }
721
722         if (text.empty ()) {
723                 return;
724         }
725
726         sub::RawSubtitle base;
727         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
728                 base,
729                 text,
730                 _ffmpeg_content->video->size().width,
731                 _ffmpeg_content->video->size().height
732                 );
733
734         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
735                 only_text()->emit_plain_start (from, i);
736         }
737 }