Fix assertion failure with overlapping video content (#666).
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "subrip_decoder.h"
31 #include "subrip_content.h"
32 #include "dcp_content.h"
33 #include "job.h"
34 #include "image.h"
35 #include "raw_image_proxy.h"
36 #include "ratio.h"
37 #include "log.h"
38 #include "render_subtitles.h"
39 #include "config.h"
40 #include "content_video.h"
41 #include "player_video.h"
42 #include "frame_rate_change.h"
43 #include "dcp_content.h"
44 #include "dcp_decoder.h"
45 #include "dcp_subtitle_content.h"
46 #include "dcp_subtitle_decoder.h"
47 #include "audio_processor.h"
48 #include "playlist.h"
49 #include <boost/foreach.hpp>
50 #include <stdint.h>
51 #include <algorithm>
52
53 #include "i18n.h"
54
55 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
56
57 using std::list;
58 using std::cout;
59 using std::min;
60 using std::max;
61 using std::min;
62 using std::vector;
63 using std::pair;
64 using std::map;
65 using std::make_pair;
66 using std::copy;
67 using boost::shared_ptr;
68 using boost::weak_ptr;
69 using boost::dynamic_pointer_cast;
70 using boost::optional;
71
72 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
73         : _film (film)
74         , _playlist (playlist)
75         , _have_valid_pieces (false)
76         , _ignore_video (false)
77         , _always_burn_subtitles (false)
78 {
79         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
80         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
81         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
82         set_video_container_size (_film->frame_size ());
83
84         film_changed (Film::AUDIO_PROCESSOR);
85 }
86
87 void
88 Player::setup_pieces ()
89 {
90         list<shared_ptr<Piece> > old_pieces = _pieces;
91         _pieces.clear ();
92
93         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
94
95                 if (!i->paths_valid ()) {
96                         continue;
97                 }
98
99                 shared_ptr<Decoder> decoder;
100                 optional<FrameRateChange> frc;
101
102                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
103                 DCPTime best_overlap_t;
104                 shared_ptr<VideoContent> best_overlap;
105                 BOOST_FOREACH (shared_ptr<Content> j, _playlist->content ()) {
106                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (j);
107                         if (!vc) {
108                                 continue;
109                         }
110
111                         DCPTime const overlap = max (vc->position(), i->position()) - min (vc->end(), i->end());
112                         if (overlap > best_overlap_t) {
113                                 best_overlap = vc;
114                                 best_overlap_t = overlap;
115                         }
116                 }
117
118                 optional<FrameRateChange> best_overlap_frc;
119                 if (best_overlap) {
120                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
121                 } else {
122                         /* No video overlap; e.g. if the DCP is just audio */
123                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
124                 }
125
126                 /* FFmpeg */
127                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
128                 if (fc) {
129                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
130                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
131                 }
132
133                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
134                 if (dc) {
135                         decoder.reset (new DCPDecoder (dc));
136                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
137                 }
138
139                 /* ImageContent */
140                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
141                 if (ic) {
142                         /* See if we can re-use an old ImageDecoder */
143                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
144                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
145                                 if (imd && imd->content() == ic) {
146                                         decoder = imd;
147                                 }
148                         }
149
150                         if (!decoder) {
151                                 decoder.reset (new ImageDecoder (ic));
152                         }
153
154                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
155                 }
156
157                 /* SndfileContent */
158                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (i);
159                 if (sc) {
160                         decoder.reset (new SndfileDecoder (sc));
161                         frc = best_overlap_frc;
162                 }
163
164                 /* SubRipContent */
165                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (i);
166                 if (rc) {
167                         decoder.reset (new SubRipDecoder (rc));
168                         frc = best_overlap_frc;
169                 }
170
171                 /* DCPSubtitleContent */
172                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
173                 if (dsc) {
174                         decoder.reset (new DCPSubtitleDecoder (dsc));
175                         frc = best_overlap_frc;
176                 }
177
178                 shared_ptr<VideoDecoder> vd = dynamic_pointer_cast<VideoDecoder> (decoder);
179                 if (vd && _ignore_video) {
180                         vd->set_ignore_video ();
181                 }
182
183                 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
184         }
185
186         _have_valid_pieces = true;
187 }
188
189 void
190 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
191 {
192         shared_ptr<Content> c = w.lock ();
193         if (!c) {
194                 return;
195         }
196
197         if (
198                 property == ContentProperty::POSITION ||
199                 property == ContentProperty::LENGTH ||
200                 property == ContentProperty::TRIM_START ||
201                 property == ContentProperty::TRIM_END ||
202                 property == ContentProperty::PATH ||
203                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
204                 property == DCPContentProperty::CAN_BE_PLAYED
205                 ) {
206
207                 _have_valid_pieces = false;
208                 Changed (frequent);
209
210         } else if (
211                 property == SubtitleContentProperty::USE_SUBTITLES ||
212                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
213                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
214                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
215                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
216                 property == VideoContentProperty::VIDEO_CROP ||
217                 property == VideoContentProperty::VIDEO_SCALE ||
218                 property == VideoContentProperty::VIDEO_FRAME_RATE ||
219                 property == VideoContentProperty::VIDEO_FADE_IN ||
220                 property == VideoContentProperty::VIDEO_FADE_OUT
221                 ) {
222
223                 Changed (frequent);
224         }
225 }
226
227 void
228 Player::set_video_container_size (dcp::Size s)
229 {
230         _video_container_size = s;
231
232         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
233         _black_image->make_black ();
234 }
235
236 void
237 Player::playlist_changed ()
238 {
239         _have_valid_pieces = false;
240         Changed (false);
241 }
242
243 void
244 Player::film_changed (Film::Property p)
245 {
246         /* Here we should notice Film properties that affect our output, and
247            alert listeners that our output now would be different to how it was
248            last time we were run.
249         */
250
251         if (p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
252                 Changed (false);
253         } else if (p == Film::AUDIO_PROCESSOR) {
254                 if (_film->audio_processor ()) {
255                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
256                 }
257         }
258 }
259
260 list<PositionImage>
261 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
262 {
263         list<PositionImage> all;
264
265         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
266                 if (!i->image) {
267                         continue;
268                 }
269
270                 /* We will scale the subtitle up to fit _video_container_size */
271                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
272
273                 /* Then we need a corrective translation, consisting of two parts:
274                  *
275                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
276                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
277                  *
278                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
279                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
280                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
281                  *
282                  * Combining these two translations gives these expressions.
283                  */
284
285                 all.push_back (
286                         PositionImage (
287                                 i->image->scale (
288                                         scaled_size,
289                                         dcp::YUV_TO_RGB_REC601,
290                                         i->image->pixel_format (),
291                                         true
292                                         ),
293                                 Position<int> (
294                                         rint (_video_container_size.width * i->rectangle.x),
295                                         rint (_video_container_size.height * i->rectangle.y)
296                                         )
297                                 )
298                         );
299         }
300
301         return all;
302 }
303
304 shared_ptr<PlayerVideo>
305 Player::black_player_video_frame (DCPTime time) const
306 {
307         return shared_ptr<PlayerVideo> (
308                 new PlayerVideo (
309                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
310                         time,
311                         Crop (),
312                         optional<double> (),
313                         _video_container_size,
314                         _video_container_size,
315                         EYES_BOTH,
316                         PART_WHOLE,
317                         PresetColourConversion::all().front().conversion
318                 )
319         );
320 }
321
322 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
323 list<shared_ptr<PlayerVideo> >
324 Player::get_video (DCPTime time, bool accurate)
325 {
326         if (!_have_valid_pieces) {
327                 setup_pieces ();
328         }
329
330         /* Find subtitles for possible burn-in */
331
332         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true);
333
334         list<PositionImage> sub_images;
335
336         /* Image subtitles */
337         list<PositionImage> c = transform_image_subtitles (ps.image);
338         copy (c.begin(), c.end(), back_inserter (sub_images));
339
340         /* Text subtitles (rendered to an image) */
341         if (!ps.text.empty ()) {
342                 list<PositionImage> s = render_subtitles (ps.text, _video_container_size);
343                 copy (s.begin (), s.end (), back_inserter (sub_images));
344         }
345
346         optional<PositionImage> subtitles;
347         if (!sub_images.empty ()) {
348                 subtitles = merge (sub_images);
349         }
350
351         /* Find pieces containing video which is happening now */
352
353         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
354                 time,
355                 time + DCPTime::from_frames (1, _film->video_frame_rate ()) - DCPTime::delta()
356                 );
357
358         list<shared_ptr<PlayerVideo> > pvf;
359
360         if (ov.empty ()) {
361                 /* No video content at this time */
362                 pvf.push_back (black_player_video_frame (time));
363         } else {
364                 /* Some video content at this time */
365                 shared_ptr<Piece> last = *(ov.rbegin ());
366                 VideoFrameType const last_type = dynamic_pointer_cast<VideoContent> (last->content)->video_frame_type ();
367
368                 /* Get video from appropriate piece(s) */
369                 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
370
371                         shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
372                         DCPOMATIC_ASSERT (decoder);
373                         shared_ptr<VideoContent> video_content = dynamic_pointer_cast<VideoContent> (piece->content);
374                         DCPOMATIC_ASSERT (video_content);
375
376                         bool const use =
377                                 /* always use the last video */
378                                 piece == last ||
379                                 /* with a corresponding L/R eye if appropriate */
380                                 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
381                                 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && video_content->video_frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
382
383                         if (use) {
384                                 /* We want to use this piece */
385                                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
386                                 if (content_video.empty ()) {
387                                         pvf.push_back (black_player_video_frame (time));
388                                 } else {
389                                         dcp::Size image_size = video_content->scale().size (video_content, _video_container_size, _film->frame_size ());
390
391                                         for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
392                                                 pvf.push_back (
393                                                         shared_ptr<PlayerVideo> (
394                                                                 new PlayerVideo (
395                                                                         i->image,
396                                                                         content_video_to_dcp (piece, i->frame),
397                                                                         video_content->crop (),
398                                                                         video_content->fade (i->frame),
399                                                                         image_size,
400                                                                         _video_container_size,
401                                                                         i->eyes,
402                                                                         i->part,
403                                                                         video_content->colour_conversion ()
404                                                                         )
405                                                                 )
406                                                         );
407                                         }
408                                 }
409                         } else {
410                                 /* Discard unused video */
411                                 decoder->get_video (dcp_to_content_video (piece, time), accurate);
412                         }
413                 }
414         }
415
416         if (subtitles) {
417                 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
418                         p->set_subtitle (subtitles.get ());
419                 }
420         }
421
422         return pvf;
423 }
424
425 shared_ptr<AudioBuffers>
426 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
427 {
428         if (!_have_valid_pieces) {
429                 setup_pieces ();
430         }
431
432         Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
433
434         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
435         audio->make_silent ();
436
437         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
438         if (ov.empty ()) {
439                 return audio;
440         }
441
442         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
443
444                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
445                 DCPOMATIC_ASSERT (content);
446                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
447                 DCPOMATIC_ASSERT (decoder);
448
449                 /* The time that we should request from the content */
450                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
451                 Frame request_frames = length_frames;
452                 DCPTime offset;
453                 if (request < DCPTime ()) {
454                         /* We went off the start of the content, so we will need to offset
455                            the stuff we get back.
456                         */
457                         offset = -request;
458                         request_frames += request.frames_round (_film->audio_frame_rate ());
459                         if (request_frames < 0) {
460                                 request_frames = 0;
461                         }
462                         request = DCPTime ();
463                 }
464
465                 Frame const content_frame = dcp_to_resampled_audio (*i, request);
466
467                 BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) {
468
469                         if (j->channels() == 0) {
470                                 /* Some content (e.g. DCPs) can have streams with no channels */
471                                 continue;
472                         }
473
474                         /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
475                         ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate);
476
477                         /* Gain */
478                         if (content->audio_gain() != 0) {
479                                 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
480                                 gain->apply_gain (content->audio_gain ());
481                                 all.audio = gain;
482                         }
483
484                         /* Remap channels */
485                         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
486                         dcp_mapped->make_silent ();
487                         AudioMapping map = j->mapping ();
488                         for (int i = 0; i < map.input_channels(); ++i) {
489                                 for (int j = 0; j < _film->audio_channels(); ++j) {
490                                         if (map.get (i, j) > 0) {
491                                                 dcp_mapped->accumulate_channel (
492                                                         all.audio.get(),
493                                                         i,
494                                                         j,
495                                                         map.get (i, j)
496                                                         );
497                                         }
498                                 }
499                         }
500
501                         if (_audio_processor) {
502                                 dcp_mapped = _audio_processor->run (dcp_mapped);
503                         }
504
505                         all.audio = dcp_mapped;
506
507                         audio->accumulate_frames (
508                                 all.audio.get(),
509                                 content_frame - all.frame,
510                                 offset.frames_round (_film->audio_frame_rate()),
511                                 min (Frame (all.audio->frames()), request_frames)
512                                 );
513                 }
514         }
515
516         return audio;
517 }
518
519 Frame
520 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
521 {
522         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
523         DCPTime s = t - piece->content->position ();
524         s = min (piece->content->length_after_trim(), s);
525         /* We're returning a frame index here so we need to floor() the conversion since we want to know the frame
526            that contains t, I think
527         */
528         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start ()).frames_floor (vc->video_frame_rate ());
529 }
530
531 DCPTime
532 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
533 {
534         shared_ptr<const VideoContent> vc = dynamic_pointer_cast<const VideoContent> (piece->content);
535         ContentTime const c = ContentTime::from_frames (f, vc->video_frame_rate ()) - piece->content->trim_start ();
536         return max (DCPTime (), DCPTime (c, piece->frc) + piece->content->position ());
537 }
538
539 Frame
540 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
541 {
542         DCPTime s = t - piece->content->position ();
543         s = min (piece->content->length_after_trim(), s);
544         /* See notes in dcp_to_content_video */
545         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
546 }
547
548 ContentTime
549 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
550 {
551         DCPTime s = t - piece->content->position ();
552         s = min (piece->content->length_after_trim(), s);
553         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
554 }
555
556 /** @param burnt true to return only subtitles to be burnt, false to return only
557  *  subtitles that should not be burnt.  This parameter will be ignored if
558  *  _always_burn_subtitles is true; in this case, all subtitles will be returned.
559  */
560 PlayerSubtitles
561 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt)
562 {
563         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
564
565         PlayerSubtitles ps (time, length);
566
567         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
568                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
569                 if (!subtitle_content->use_subtitles () || (!_always_burn_subtitles && (burnt != subtitle_content->burn_subtitles ()))) {
570                         continue;
571                 }
572
573                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
574                 ContentTime const from = dcp_to_content_subtitle (*j, time);
575                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
576                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
577
578                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
579                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
580
581                         /* Apply content's subtitle offsets */
582                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
583                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
584
585                         /* Apply content's subtitle scale */
586                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
587                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
588
589                         /* Apply a corrective translation to keep the subtitle centred after that scale */
590                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
591                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
592
593                         ps.image.push_back (i->sub);
594                 }
595
596                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
597                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
598                         BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) {
599                                 s.set_h_position (s.h_position() + subtitle_content->subtitle_x_offset ());
600                                 s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
601                                 float const xs = subtitle_content->subtitle_x_scale();
602                                 float const ys = subtitle_content->subtitle_y_scale();
603                                 float const average = s.size() * (xs + ys) / 2;
604                                 s.set_size (average);
605                                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
606                                         s.set_aspect_adjust (xs / ys);
607                                 }
608                                 ps.text.push_back (s);
609                         }
610                 }
611         }
612
613         return ps;
614 }
615
616 list<shared_ptr<Font> >
617 Player::get_subtitle_fonts ()
618 {
619         if (!_have_valid_pieces) {
620                 setup_pieces ();
621         }
622
623         list<shared_ptr<Font> > fonts;
624         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
625                 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (p->content);
626                 if (sc) {
627                         /* XXX: things may go wrong if there are duplicate font IDs
628                            with different font files.
629                         */
630                         list<shared_ptr<Font> > f = sc->fonts ();
631                         copy (f.begin(), f.end(), back_inserter (fonts));
632                 }
633         }
634
635         return fonts;
636 }
637
638 /** Set this player never to produce any video data */
639 void
640 Player::set_ignore_video ()
641 {
642         _ignore_video = true;
643 }
644
645 /** Set whether or not this player should always burn text subtitles into the image,
646  *  regardless of the content settings.
647  *  @param burn true to always burn subtitles, false to obey content settings.
648  */
649 void
650 Player::set_always_burn_subtitles (bool burn)
651 {
652         _always_burn_subtitles = burn;
653 }