Missed update to private test repo version.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
1 /*
2     Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 /** @file  src/ffmpeg_decoder.cc
22  *  @brief A decoder using FFmpeg to decode content.
23  */
24
25 #include "filter.h"
26 #include "exceptions.h"
27 #include "image.h"
28 #include "util.h"
29 #include "log.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
40 #include "film.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
50 extern "C" {
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
53 }
54 #include <boost/foreach.hpp>
55 #include <boost/algorithm/string.hpp>
56 #include <vector>
57 #include <iomanip>
58 #include <iostream>
59 #include <stdint.h>
60
61 #include "i18n.h"
62
63
64 using std::cout;
65 using std::string;
66 using std::vector;
67 using std::list;
68 using std::min;
69 using std::pair;
70 using std::max;
71 using std::map;
72 using boost::shared_ptr;
73 using boost::is_any_of;
74 using boost::split;
75 using boost::optional;
76 using boost::dynamic_pointer_cast;
77 using dcp::Size;
78
79 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
80         : FFmpeg (c)
81         , Decoder (film)
82         , _have_current_subtitle (false)
83 {
84         if (c->video) {
85                 video.reset (new VideoDecoder (this, c));
86                 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
87                 /* It doesn't matter what size or pixel format this is, it just needs to be black */
88                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
89                 _black_image->make_black ();
90         } else {
91                 _pts_offset = ContentTime ();
92         }
93
94         if (c->audio) {
95                 audio.reset (new AudioDecoder (this, c->audio, fast));
96         }
97
98         if (c->only_text()) {
99                 /* XXX: this time here should be the time of the first subtitle, not 0 */
100                 text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, c->only_text(), ContentTime())));
101         }
102
103         _next_time.resize (_format_context->nb_streams);
104 }
105
106 void
107 FFmpegDecoder::flush ()
108 {
109         /* Get any remaining frames */
110
111         _packet.data = 0;
112         _packet.size = 0;
113
114         /* XXX: should we reset _packet.data and size after each *_decode_* call? */
115
116         while (video && decode_video_packet()) {}
117
118         if (audio) {
119                 decode_audio_packet ();
120         }
121
122         /* Make sure all streams are the same length and round up to the next video frame */
123
124         FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
125         ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
126         full_length = full_length.ceil (frc.source);
127         if (video) {
128                 double const vfr = _ffmpeg_content->video_frame_rate().get();
129                 Frame const f = full_length.frames_round (vfr);
130                 Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
131                 while (v < f) {
132                         video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
133                         ++v;
134                 }
135         }
136
137         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
138                 ContentTime a = audio->stream_position(film(), i);
139                 /* Unfortunately if a is 0 that really means that we don't know the stream position since
140                    there has been no data on it since the last seek.  In this case we'll just do nothing
141                    here.  I'm not sure if that's the right idea.
142                 */
143                 if (a > ContentTime()) {
144                         while (a < full_length) {
145                                 ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
146                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
147                                 silence->make_silent ();
148                                 audio->emit (film(), i, silence, a);
149                                 a += to_do;
150                         }
151                 }
152         }
153
154         if (audio) {
155                 audio->flush ();
156         }
157 }
158
159 bool
160 FFmpegDecoder::pass ()
161 {
162         int r = av_read_frame (_format_context, &_packet);
163
164         /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
165            has pretty-much succeeded (and hence generated data which should be processed).
166            Hence it makes sense to continue here in that case.
167         */
168         if (r < 0 && r != AVERROR_INVALIDDATA) {
169                 if (r != AVERROR_EOF) {
170                         /* Maybe we should fail here, but for now we'll just finish off instead */
171                         char buf[256];
172                         av_strerror (r, buf, sizeof(buf));
173                         LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
174                 }
175
176                 flush ();
177                 return true;
178         }
179
180         int const si = _packet.stream_index;
181         shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
182
183         if (_video_stream && si == _video_stream.get() && !video->ignore()) {
184                 decode_video_packet ();
185         } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
186                 decode_subtitle_packet ();
187         } else {
188                 decode_audio_packet ();
189         }
190
191         av_packet_unref (&_packet);
192         return false;
193 }
194
195 /** @param data pointer to array of pointers to buffers.
196  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
197  */
198 shared_ptr<AudioBuffers>
199 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
200 {
201         DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
202
203         int const size = av_samples_get_buffer_size (
204                 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
205                 );
206
207         /* Deinterleave and convert to float */
208
209         /* total_samples and frames will be rounded down here, so if there are stray samples at the end
210            of the block that do not form a complete sample or frame they will be dropped.
211         */
212         int const total_samples = size / bytes_per_audio_sample (stream);
213         int const channels = stream->channels();
214         int const frames = total_samples / channels;
215         shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
216         float** data = audio->data();
217
218         switch (audio_sample_format (stream)) {
219         case AV_SAMPLE_FMT_U8:
220         {
221                 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
222                 int sample = 0;
223                 int channel = 0;
224                 for (int i = 0; i < total_samples; ++i) {
225                         data[channel][sample] = float(*p++) / (1 << 23);
226
227                         ++channel;
228                         if (channel == channels) {
229                                 channel = 0;
230                                 ++sample;
231                         }
232                 }
233         }
234         break;
235
236         case AV_SAMPLE_FMT_S16:
237         {
238                 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
239                 int sample = 0;
240                 int channel = 0;
241                 for (int i = 0; i < total_samples; ++i) {
242                         data[channel][sample] = float(*p++) / (1 << 15);
243
244                         ++channel;
245                         if (channel == channels) {
246                                 channel = 0;
247                                 ++sample;
248                         }
249                 }
250         }
251         break;
252
253         case AV_SAMPLE_FMT_S16P:
254         {
255                 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
256                 for (int i = 0; i < channels; ++i) {
257                         for (int j = 0; j < frames; ++j) {
258                                 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
259                         }
260                 }
261         }
262         break;
263
264         case AV_SAMPLE_FMT_S32:
265         {
266                 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
267                 int sample = 0;
268                 int channel = 0;
269                 for (int i = 0; i < total_samples; ++i) {
270                         data[channel][sample] = static_cast<float>(*p++) / 2147483648;
271
272                         ++channel;
273                         if (channel == channels) {
274                                 channel = 0;
275                                 ++sample;
276                         }
277                 }
278         }
279         break;
280
281         case AV_SAMPLE_FMT_S32P:
282         {
283                 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
284                 for (int i = 0; i < channels; ++i) {
285                         for (int j = 0; j < frames; ++j) {
286                                 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
287                         }
288                 }
289         }
290         break;
291
292         case AV_SAMPLE_FMT_FLT:
293         {
294                 float* p = reinterpret_cast<float*> (_frame->data[0]);
295                 int sample = 0;
296                 int channel = 0;
297                 for (int i = 0; i < total_samples; ++i) {
298                         data[channel][sample] = *p++;
299
300                         ++channel;
301                         if (channel == channels) {
302                                 channel = 0;
303                                 ++sample;
304                         }
305                 }
306         }
307         break;
308
309         case AV_SAMPLE_FMT_FLTP:
310         {
311                 float** p = reinterpret_cast<float**> (_frame->data);
312                 /* Sometimes there aren't as many channels in the _frame as in the stream */
313                 for (int i = 0; i < _frame->channels; ++i) {
314                         memcpy (data[i], p[i], frames * sizeof(float));
315                 }
316                 for (int i = _frame->channels; i < channels; ++i) {
317                         audio->make_silent (i);
318                 }
319         }
320         break;
321
322         default:
323                 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
324         }
325
326         return audio;
327 }
328
329 AVSampleFormat
330 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
331 {
332         return stream->stream (_format_context)->codec->sample_fmt;
333 }
334
335 int
336 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
337 {
338         return av_get_bytes_per_sample (audio_sample_format (stream));
339 }
340
341 void
342 FFmpegDecoder::seek (ContentTime time, bool accurate)
343 {
344         Decoder::seek (time, accurate);
345
346         /* If we are doing an `accurate' seek, we need to use pre-roll, as
347            we don't really know what the seek will give us.
348         */
349
350         ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
351         time -= pre_roll;
352
353         /* XXX: it seems debatable whether PTS should be used here...
354            http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
355         */
356
357         optional<int> stream;
358
359         if (_video_stream) {
360                 stream = _video_stream;
361         } else {
362                 shared_ptr<FFmpegAudioStream> s = dynamic_pointer_cast<FFmpegAudioStream> (_ffmpeg_content->audio->stream ());
363                 if (s) {
364                         stream = s->index (_format_context);
365                 }
366         }
367
368         DCPOMATIC_ASSERT (stream);
369
370         ContentTime u = time - _pts_offset;
371         if (u < ContentTime ()) {
372                 u = ContentTime ();
373         }
374         av_seek_frame (
375                 _format_context,
376                 stream.get(),
377                 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
378                 AVSEEK_FLAG_BACKWARD
379                 );
380
381         {
382                 /* Force re-creation of filter graphs to reset them and hence to make sure
383                    they don't have any pre-seek frames knocking about.
384                 */
385                 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
386                 _filter_graphs.clear ();
387         }
388
389         if (video_codec_context ()) {
390                 avcodec_flush_buffers (video_codec_context());
391         }
392
393         BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, ffmpeg_content()->ffmpeg_audio_streams()) {
394                 avcodec_flush_buffers (i->stream(_format_context)->codec);
395         }
396
397         if (subtitle_codec_context ()) {
398                 avcodec_flush_buffers (subtitle_codec_context ());
399         }
400
401         _have_current_subtitle = false;
402
403         BOOST_FOREACH (optional<ContentTime>& i, _next_time) {
404                 i = optional<ContentTime>();
405         }
406 }
407
408 void
409 FFmpegDecoder::decode_audio_packet ()
410 {
411         /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
412            several times.
413         */
414
415         AVPacket copy_packet = _packet;
416         int const stream_index = copy_packet.stream_index;
417
418         /* XXX: inefficient */
419         vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
420         vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
421         while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
422                 ++stream;
423         }
424
425         if (stream == streams.end ()) {
426                 /* The packet's stream may not be an audio one; just ignore it in this method if so */
427                 return;
428         }
429
430         while (copy_packet.size > 0) {
431
432                 int frame_finished;
433                 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, &copy_packet);
434                 if (decode_result < 0) {
435                         /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
436                            some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
437                            if it overreads the auxiliary data.  ffplay carries on if frame_finished is true,
438                            even in the face of such an error, so I think we should too.
439
440                            Returning from the method here caused mantis #352.
441                         */
442                         LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
443
444                         /* Fudge decode_result so that we come out of the while loop when
445                            we've processed this data.
446                         */
447                         decode_result = copy_packet.size;
448                 }
449
450                 if (frame_finished) {
451                         shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
452
453                         ContentTime ct;
454                         if (_frame->pts == AV_NOPTS_VALUE) {
455                                 /* In some streams we see not every frame coming through with a timestamp; for those
456                                    that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
457                                    particularly noticeable with TrueHD streams (see #1111).
458                                 */
459                                 if (_next_time[stream_index]) {
460                                         ct = *_next_time[stream_index];
461                                 }
462                         } else {
463                                 ct = ContentTime::from_seconds (
464                                         av_frame_get_best_effort_timestamp (_frame) *
465                                         av_q2d ((*stream)->stream (_format_context)->time_base))
466                                         + _pts_offset;
467                         }
468
469                         _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
470
471                         if (ct < ContentTime ()) {
472                                 /* Discard audio data that comes before time 0 */
473                                 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
474                                 data->move (data->frames() - remove, remove, 0);
475                                 data->set_frames (data->frames() - remove);
476                                 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
477                         }
478
479                         if (ct < ContentTime()) {
480                                 LOG_WARNING (
481                                         "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
482                                         to_string(ct),
483                                         data->frames(),
484                                         copy_packet.stream_index,
485                                         copy_packet.pts,
486                                         av_frame_get_best_effort_timestamp(_frame),
487                                         av_q2d((*stream)->stream(_format_context)->time_base),
488                                         to_string(_pts_offset)
489                                         );
490                         }
491
492                         /* Give this data provided there is some, and its time is sane */
493                         if (ct >= ContentTime() && data->frames() > 0) {
494                                 audio->emit (film(), *stream, data, ct);
495                         }
496                 }
497
498                 copy_packet.data += decode_result;
499                 copy_packet.size -= decode_result;
500         }
501 }
502
503 bool
504 FFmpegDecoder::decode_video_packet ()
505 {
506         DCPOMATIC_ASSERT (_video_stream);
507
508         int frame_finished;
509         if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
510                 return false;
511         }
512
513         boost::mutex::scoped_lock lm (_filter_graphs_mutex);
514
515         shared_ptr<VideoFilterGraph> graph;
516
517         list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
518         while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
519                 ++i;
520         }
521
522         if (i == _filter_graphs.end ()) {
523                 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
524                 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
525                 graph->setup (_ffmpeg_content->filters ());
526                 _filter_graphs.push_back (graph);
527                 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
528         } else {
529                 graph = *i;
530         }
531
532         list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
533
534         for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
535
536                 shared_ptr<Image> image = i->first;
537
538                 if (i->second != AV_NOPTS_VALUE) {
539                         double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
540
541                         video->emit (
542                                 film(),
543                                 shared_ptr<ImageProxy> (new RawImageProxy (image)),
544                                 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
545                                 );
546                 } else {
547                         LOG_WARNING_NC ("Dropping frame without PTS");
548                 }
549         }
550
551         return true;
552 }
553
554 void
555 FFmpegDecoder::decode_subtitle_packet ()
556 {
557         int got_subtitle;
558         AVSubtitle sub;
559         if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
560                 return;
561         }
562
563         /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
564         if (_have_current_subtitle) {
565                 if (_current_subtitle_to) {
566                         only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
567                 } else {
568                         only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
569                 }
570                 _have_current_subtitle = false;
571         }
572
573         if (sub.num_rects <= 0) {
574                 /* Nothing new in this subtitle */
575                 return;
576         }
577
578         /* Subtitle PTS (within the source, not taking into account any of the
579            source that we may have chopped off for the DCP).
580         */
581         FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
582         ContentTime from;
583         from = sub_period.from + _pts_offset;
584         if (sub_period.to) {
585                 _current_subtitle_to = *sub_period.to + _pts_offset;
586         } else {
587                 _current_subtitle_to = optional<ContentTime>();
588                 _have_current_subtitle = true;
589         }
590
591         for (unsigned int i = 0; i < sub.num_rects; ++i) {
592                 AVSubtitleRect const * rect = sub.rects[i];
593
594                 switch (rect->type) {
595                 case SUBTITLE_NONE:
596                         break;
597                 case SUBTITLE_BITMAP:
598                         decode_bitmap_subtitle (rect, from);
599                         break;
600                 case SUBTITLE_TEXT:
601                         cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
602                         break;
603                 case SUBTITLE_ASS:
604                         decode_ass_subtitle (rect->ass, from);
605                         break;
606                 }
607         }
608
609         if (_current_subtitle_to) {
610                 only_text()->emit_stop (*_current_subtitle_to);
611         }
612
613         avsubtitle_free (&sub);
614 }
615
616 void
617 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
618 {
619         /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
620            G, third R, fourth A.
621         */
622         shared_ptr<Image> image (new Image (AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true));
623
624 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
625         /* Start of the first line in the subtitle */
626         uint8_t* sub_p = rect->pict.data[0];
627         /* sub_p looks up into a BGRA palette which is here
628            (i.e. first byte B, second G, third R, fourth A)
629         */
630         uint32_t const * palette = (uint32_t *) rect->pict.data[1];
631 #else
632         /* Start of the first line in the subtitle */
633         uint8_t* sub_p = rect->data[0];
634         /* sub_p looks up into a BGRA palette which is at rect->data[1].
635            (first byte B, second G, third R, fourth A)
636         */
637 #endif
638         /* And the stream has a map of those palette colours to colours
639            chosen by the user; created a `mapped' palette from those settings.
640         */
641         map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
642         vector<RGBA> mapped_palette (rect->nb_colors);
643         uint8_t const * palette = rect->data[1];
644         for (int i = 0; i < rect->nb_colors; ++i) {
645                 RGBA c (palette[2], palette[1], palette[0], palette[3]);
646                 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
647                 if (j != colour_map.end ()) {
648                         mapped_palette[i] = j->second;
649                 } else {
650                         /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
651                            it is from a project that was created before this stuff was added.  Just use the
652                            colour straight from the original palette.
653                         */
654                         mapped_palette[i] = c;
655                 }
656                 palette += 4;
657         }
658
659         /* Start of the output data */
660         uint8_t* out_p = image->data()[0];
661
662         for (int y = 0; y < rect->h; ++y) {
663                 uint8_t* sub_line_p = sub_p;
664                 uint8_t* out_line_p = out_p;
665                 for (int x = 0; x < rect->w; ++x) {
666                         RGBA const p = mapped_palette[*sub_line_p++];
667                         *out_line_p++ = p.b;
668                         *out_line_p++ = p.g;
669                         *out_line_p++ = p.r;
670                         *out_line_p++ = p.a;
671                 }
672 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
673                 sub_p += rect->pict.linesize[0];
674 #else
675                 sub_p += rect->linesize[0];
676 #endif
677                 out_p += image->stride()[0];
678         }
679
680         int target_width = subtitle_codec_context()->width;
681         if (target_width == 0 && video_codec_context()) {
682                 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
683                    know if it's supposed to mean something from FFmpeg's point of view.
684                 */
685                 target_width = video_codec_context()->width;
686         }
687         int target_height = subtitle_codec_context()->height;
688         if (target_height == 0 && video_codec_context()) {
689                 target_height = video_codec_context()->height;
690         }
691         DCPOMATIC_ASSERT (target_width);
692         DCPOMATIC_ASSERT (target_height);
693         dcpomatic::Rect<double> const scaled_rect (
694                 static_cast<double> (rect->x) / target_width,
695                 static_cast<double> (rect->y) / target_height,
696                 static_cast<double> (rect->w) / target_width,
697                 static_cast<double> (rect->h) / target_height
698                 );
699
700         only_text()->emit_bitmap_start (from, image, scaled_rect);
701 }
702
703 void
704 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
705 {
706         /* We have no styles and no Format: line, so I'm assuming that FFmpeg
707            produces a single format of Dialogue: lines...
708         */
709
710         int commas = 0;
711         string text;
712         for (size_t i = 0; i < ass.length(); ++i) {
713                 if (commas < 9 && ass[i] == ',') {
714                         ++commas;
715                 } else if (commas == 9) {
716                         text += ass[i];
717                 }
718         }
719
720         if (text.empty ()) {
721                 return;
722         }
723
724         sub::RawSubtitle base;
725         list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
726                 base,
727                 text,
728                 _ffmpeg_content->video->size().width,
729                 _ffmpeg_content->video->size().height
730                 );
731
732         BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
733                 only_text()->emit_plain_start (from, i);
734         }
735 }