No-op: remove all trailing whitespace.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "subrip_decoder.h"
31 #include "subrip_content.h"
32 #include "dcp_content.h"
33 #include "job.h"
34 #include "image.h"
35 #include "raw_image_proxy.h"
36 #include "ratio.h"
37 #include "log.h"
38 #include "render_subtitles.h"
39 #include "config.h"
40 #include "content_video.h"
41 #include "player_video.h"
42 #include "frame_rate_change.h"
43 #include "dcp_content.h"
44 #include "dcp_decoder.h"
45 #include "dcp_subtitle_content.h"
46 #include "dcp_subtitle_decoder.h"
47 #include "audio_processor.h"
48 #include <boost/foreach.hpp>
49 #include <stdint.h>
50 #include <algorithm>
51
52 #include "i18n.h"
53
54 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
55
56 using std::list;
57 using std::cout;
58 using std::min;
59 using std::max;
60 using std::min;
61 using std::vector;
62 using std::pair;
63 using std::map;
64 using std::make_pair;
65 using std::copy;
66 using boost::shared_ptr;
67 using boost::weak_ptr;
68 using boost::dynamic_pointer_cast;
69 using boost::optional;
70
71 Player::Player (shared_ptr<const Film> film)
72         : _film (film)
73         , _have_valid_pieces (false)
74         , _ignore_video (false)
75         , _burn_subtitles (film->burn_subtitles ())
76 {
77         _film_content_changed_connection = _film->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
78         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
79         set_video_container_size (_film->frame_size ());
80
81         film_changed (Film::AUDIO_PROCESSOR);
82 }
83
84 void
85 Player::setup_pieces ()
86 {
87         list<shared_ptr<Piece> > old_pieces = _pieces;
88         _pieces.clear ();
89
90         ContentList content = _film->content ();
91
92         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
93
94                 if (!(*i)->paths_valid ()) {
95                         continue;
96                 }
97
98                 shared_ptr<Decoder> decoder;
99                 optional<FrameRateChange> frc;
100
101                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
102                 DCPTime best_overlap_t;
103                 shared_ptr<VideoContent> best_overlap;
104                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
105                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
106                         if (!vc) {
107                                 continue;
108                         }
109
110                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
111                         if (overlap > best_overlap_t) {
112                                 best_overlap = vc;
113                                 best_overlap_t = overlap;
114                         }
115                 }
116
117                 optional<FrameRateChange> best_overlap_frc;
118                 if (best_overlap) {
119                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
120                 } else {
121                         /* No video overlap; e.g. if the DCP is just audio */
122                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
123                 }
124
125                 /* FFmpeg */
126                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
127                 if (fc) {
128                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
129                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
130                 }
131
132                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (*i);
133                 if (dc) {
134                         decoder.reset (new DCPDecoder (dc));
135                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
136                 }
137
138                 /* ImageContent */
139                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
140                 if (ic) {
141                         /* See if we can re-use an old ImageDecoder */
142                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
143                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
144                                 if (imd && imd->content() == ic) {
145                                         decoder = imd;
146                                 }
147                         }
148
149                         if (!decoder) {
150                                 decoder.reset (new ImageDecoder (ic));
151                         }
152
153                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
154                 }
155
156                 /* SndfileContent */
157                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
158                 if (sc) {
159                         decoder.reset (new SndfileDecoder (sc));
160                         frc = best_overlap_frc;
161                 }
162
163                 /* SubRipContent */
164                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
165                 if (rc) {
166                         decoder.reset (new SubRipDecoder (rc));
167                         frc = best_overlap_frc;
168                 }
169
170                 /* DCPSubtitleContent */
171                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (*i);
172                 if (dsc) {
173                         decoder.reset (new DCPSubtitleDecoder (dsc));
174                         frc = best_overlap_frc;
175                 }
176
177                 shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (decoder);
178                 if (vd && _ignore_video) {
179                         vd->set_ignore_video ();
180                 }
181
182                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
183         }
184
185         _have_valid_pieces = true;
186 }
187
188 void
189 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
190 {
191         shared_ptr<Content> c = w.lock ();
192         if (!c) {
193                 return;
194         }
195
196         if (
197                 property == ContentProperty::POSITION ||
198                 property == ContentProperty::LENGTH ||
199                 property == ContentProperty::TRIM_START ||
200                 property == ContentProperty::TRIM_END ||
201                 property == ContentProperty::PATH ||
202                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
203                 property == DCPContentProperty::CAN_BE_PLAYED
204                 ) {
205
206                 _have_valid_pieces = false;
207                 Changed (frequent);
208
209         } else if (
210                 property == SubtitleContentProperty::USE_SUBTITLES ||
211                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
212                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
213                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
214                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
215                 property == VideoContentProperty::VIDEO_CROP ||
216                 property == VideoContentProperty::VIDEO_SCALE ||
217                 property == VideoContentProperty::VIDEO_FRAME_RATE ||
218                 property == VideoContentProperty::VIDEO_FADE_IN ||
219                 property == VideoContentProperty::VIDEO_FADE_OUT
220                 ) {
221
222                 Changed (frequent);
223         }
224 }
225
226 void
227 Player::set_video_container_size (dcp::Size s)
228 {
229         _video_container_size = s;
230
231         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
232         _black_image->make_black ();
233 }
234
235 void
236 Player::film_changed (Film::Property p)
237 {
238         /* Here we should notice Film properties that affect our output, and
239            alert listeners that our output now would be different to how it was
240            last time we were run.
241         */
242
243         if (p == Film::CONTENT) {
244                 _have_valid_pieces = false;
245                 Changed (false);
246         } else if (p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
247                 Changed (false);
248         } else if (p == Film::AUDIO_PROCESSOR) {
249                 if (_film->audio_processor ()) {
250                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
251                 }
252         }
253 }
254
255 list<PositionImage>
256 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
257 {
258         list<PositionImage> all;
259
260         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
261                 if (!i->image) {
262                         continue;
263                 }
264
265                 /* We will scale the subtitle up to fit _video_container_size */
266                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
267
268                 /* Then we need a corrective translation, consisting of two parts:
269                  *
270                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
271                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
272                  *
273                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
274                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
275                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
276                  *
277                  * Combining these two translations gives these expressions.
278                  */
279
280                 all.push_back (
281                         PositionImage (
282                                 i->image->scale (
283                                         scaled_size,
284                                         dcp::YUV_TO_RGB_REC601,
285                                         i->image->pixel_format (),
286                                         true
287                                         ),
288                                 Position<int> (
289                                         rint (_video_container_size.width * i->rectangle.x),
290                                         rint (_video_container_size.height * i->rectangle.y)
291                                         )
292                                 )
293                         );
294         }
295
296         return all;
297 }
298
299 shared_ptr<PlayerVideo>
300 Player::black_player_video_frame (DCPTime time) const
301 {
302         return shared_ptr<PlayerVideo> (
303                 new PlayerVideo (
304                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
305                         time,
306                         Crop (),
307                         optional<float> (),
308                         _video_container_size,
309                         _video_container_size,
310                         EYES_BOTH,
311                         PART_WHOLE,
312                         PresetColourConversion::all().front().conversion
313                 )
314         );
315 }
316
317 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
318 list<shared_ptr<PlayerVideo> >
319 Player::get_video (DCPTime time, bool accurate)
320 {
321         if (!_have_valid_pieces) {
322                 setup_pieces ();
323         }
324
325         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
326                 time,
327                 time + DCPTime::from_frames (1, _film->video_frame_rate ()) - DCPTime::delta()
328                 );
329
330         list<shared_ptr<PlayerVideo> > pvf;
331
332         if (ov.empty ()) {
333                 /* No video content at this time */
334                 pvf.push_back (black_player_video_frame (time));
335         } else {
336                 /* Create a PlayerVideo from the content's video at this time */
337
338                 shared_ptr<Piece> piece = ov.back ();
339                 shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
340                 DCPOMATIC_ASSERT (decoder);
341                 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
342                 DCPOMATIC_ASSERT (content);
343
344                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
345                 if (content_video.empty ()) {
346                         pvf.push_back (black_player_video_frame (time));
347                         return pvf;
348                 }
349
350                 dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
351
352                 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
353                         pvf.push_back (
354                                 shared_ptr<PlayerVideo> (
355                                         new PlayerVideo (
356                                                 i->image,
357                                                 content_video_to_dcp (piece, i->frame),
358                                                 content->crop (),
359                                                 content->fade (i->frame),
360                                                 image_size,
361                                                 _video_container_size,
362                                                 i->eyes,
363                                                 i->part,
364                                                 content->colour_conversion ()
365                                                 )
366                                         )
367                                 );
368                 }
369         }
370
371         /* Add subtitles (for possible burn-in) to whatever PlayerVideos we got */
372
373         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false);
374
375         list<PositionImage> sub_images;
376
377         /* Image subtitles */
378         list<PositionImage> c = transform_image_subtitles (ps.image);
379         copy (c.begin(), c.end(), back_inserter (sub_images));
380
381         /* Text subtitles (rendered to an image) */
382         if (_burn_subtitles && !ps.text.empty ()) {
383                 list<PositionImage> s = render_subtitles (ps.text, _video_container_size);
384                 copy (s.begin (), s.end (), back_inserter (sub_images));
385         }
386
387         if (!sub_images.empty ()) {
388                 for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
389                         (*i)->set_subtitle (merge (sub_images));
390                 }
391         }
392
393         return pvf;
394 }
395
396 shared_ptr<AudioBuffers>
397 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
398 {
399         if (!_have_valid_pieces) {
400                 setup_pieces ();
401         }
402
403         Frame const length_frames = length.frames (_film->audio_frame_rate ());
404
405         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
406         audio->make_silent ();
407
408         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
409         if (ov.empty ()) {
410                 return audio;
411         }
412
413         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
414
415                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
416                 DCPOMATIC_ASSERT (content);
417                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
418                 DCPOMATIC_ASSERT (decoder);
419
420                 /* The time that we should request from the content */
421                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
422                 Frame request_frames = length_frames;
423                 DCPTime offset;
424                 if (request < DCPTime ()) {
425                         /* We went off the start of the content, so we will need to offset
426                            the stuff we get back.
427                         */
428                         offset = -request;
429                         request_frames += request.frames (_film->audio_frame_rate ());
430                         if (request_frames < 0) {
431                                 request_frames = 0;
432                         }
433                         request = DCPTime ();
434                 }
435
436                 Frame const content_frame = dcp_to_content_audio (*i, request);
437
438                 BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
439
440                         /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
441                         ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
442
443                         /* Gain */
444                         if (content->audio_gain() != 0) {
445                                 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
446                                 gain->apply_gain (content->audio_gain ());
447                                 all.audio = gain;
448                         }
449
450                         /* Remap channels */
451                         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
452                         dcp_mapped->make_silent ();
453                         AudioMapping map = j->mapping ();
454                         for (int i = 0; i < map.input_channels(); ++i) {
455                                 for (int j = 0; j < _film->audio_channels(); ++j) {
456                                         if (map.get (i, j) > 0) {
457                                                 dcp_mapped->accumulate_channel (
458                                                         all.audio.get(),
459                                                         i,
460                                                         j,
461                                                         map.get (i, j)
462                                                         );
463                                         }
464                                 }
465                         }
466
467                         if (_audio_processor) {
468                                 dcp_mapped = _audio_processor->run (dcp_mapped);
469                         }
470
471                         all.audio = dcp_mapped;
472
473                         audio->accumulate_frames (
474                                 all.audio.get(),
475                                 content_frame - all.frame,
476                                 offset.frames (_film->audio_frame_rate()),
477                                 min (Frame (all.audio->frames()), request_frames)
478                                 );
479                 }
480         }
481
482         return audio;
483 }
484
485 Frame
486 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
487 {
488         /* s is the offset of t from the start position of this content */
489         DCPTime s = t - piece->content->position ();
490         s = DCPTime (max (DCPTime::Type (0), s.get ()));
491         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
492
493         /* Convert this to the content frame */
494         return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) / piece->frc.factor ();
495 }
496
497 DCPTime
498 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
499 {
500         DCPTime t = DCPTime::from_frames (f * piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
501         if (t < DCPTime ()) {
502                 t = DCPTime ();
503         }
504
505         return t;
506 }
507
508 Frame
509 Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
510 {
511         /* s is the offset of t from the start position of this content */
512         DCPTime s = t - piece->content->position ();
513         s = DCPTime (max (DCPTime::Type (0), s.get ()));
514         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
515
516         /* Convert this to the content frame */
517         return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
518 }
519
520 ContentTime
521 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
522 {
523         /* s is the offset of t from the start position of this content */
524         DCPTime s = t - piece->content->position ();
525         s = DCPTime (max (DCPTime::Type (0), s.get ()));
526         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
527
528         return ContentTime (s + piece->content->trim_start(), piece->frc);
529 }
530
531 void
532 PlayerStatistics::dump (shared_ptr<Log> log) const
533 {
534         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
535         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
536 }
537
538 PlayerStatistics const &
539 Player::statistics () const
540 {
541         return _statistics;
542 }
543
544 PlayerSubtitles
545 Player::get_subtitles (DCPTime time, DCPTime length, bool starting)
546 {
547         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
548
549         PlayerSubtitles ps (time, length);
550
551         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
552                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
553                 if (!subtitle_content->use_subtitles ()) {
554                         continue;
555                 }
556
557                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
558                 ContentTime const from = dcp_to_content_subtitle (*j, time);
559                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
560                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
561
562                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
563                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
564
565                         /* Apply content's subtitle offsets */
566                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
567                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
568
569                         /* Apply content's subtitle scale */
570                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
571                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
572
573                         /* Apply a corrective translation to keep the subtitle centred after that scale */
574                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
575                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
576
577                         ps.image.push_back (i->sub);
578                 }
579
580                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
581                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
582                         BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) {
583                                 s.set_h_position (s.h_position() + subtitle_content->subtitle_x_offset ());
584                                 s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
585                                 float const xs = subtitle_content->subtitle_x_scale();
586                                 float const ys = subtitle_content->subtitle_y_scale();
587                                 float const average = s.size() * (xs + ys) / 2;
588                                 s.set_size (average);
589                                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
590                                         s.set_aspect_adjust (xs / ys);
591                                 }
592                                 ps.text.push_back (s);
593                         }
594                 }
595         }
596
597         return ps;
598 }
599
600 list<shared_ptr<Font> >
601 Player::get_subtitle_fonts ()
602 {
603         if (!_have_valid_pieces) {
604                 setup_pieces ();
605         }
606
607         list<shared_ptr<Font> > fonts;
608         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
609                 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (p->content);
610                 if (sc) {
611                         /* XXX: things may go wrong if there are duplicate font IDs
612                            with different font files.
613                         */
614                         list<shared_ptr<Font> > f = sc->fonts ();
615                         copy (f.begin(), f.end(), back_inserter (fonts));
616                 }
617         }
618
619         return fonts;
620 }
621
622 /** Set this player never to produce any video data */
623 void
624 Player::set_ignore_video ()
625 {
626         _ignore_video = true;
627 }
628
629 /** Set whether or not this player should burn text subtitles into the image.
630  *  @param burn true to burn subtitles, false to not.
631  */
632 void
633 Player::set_burn_subtitles (bool burn)
634 {
635         _burn_subtitles = burn;
636 }