Add language property to SubtitleContent and use it in output DCP subtitle files.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include "player.h"
21 #include "film.h"
22 #include "ffmpeg_decoder.h"
23 #include "audio_buffers.h"
24 #include "ffmpeg_content.h"
25 #include "image_decoder.h"
26 #include "image_content.h"
27 #include "sndfile_decoder.h"
28 #include "sndfile_content.h"
29 #include "subtitle_content.h"
30 #include "subrip_decoder.h"
31 #include "subrip_content.h"
32 #include "dcp_content.h"
33 #include "playlist.h"
34 #include "job.h"
35 #include "image.h"
36 #include "raw_image_proxy.h"
37 #include "ratio.h"
38 #include "log.h"
39 #include "scaler.h"
40 #include "render_subtitles.h"
41 #include "config.h"
42 #include "content_video.h"
43 #include "player_video.h"
44 #include "frame_rate_change.h"
45 #include "dcp_content.h"
46 #include "dcp_decoder.h"
47 #include "dcp_subtitle_content.h"
48 #include "dcp_subtitle_decoder.h"
49 #include <boost/foreach.hpp>
50 #include <stdint.h>
51 #include <algorithm>
52
53 #include "i18n.h"
54
55 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
56
57 using std::list;
58 using std::cout;
59 using std::min;
60 using std::max;
61 using std::min;
62 using std::vector;
63 using std::pair;
64 using std::map;
65 using std::make_pair;
66 using boost::shared_ptr;
67 using boost::weak_ptr;
68 using boost::dynamic_pointer_cast;
69 using boost::optional;
70
71 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
72         : _film (f)
73         , _playlist (p)
74         , _have_valid_pieces (false)
75         , _approximate_size (false)
76 {
77         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
78         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
79         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
80         set_video_container_size (_film->frame_size ());
81 }
82
83 void
84 Player::setup_pieces ()
85 {
86         list<shared_ptr<Piece> > old_pieces = _pieces;
87         _pieces.clear ();
88
89         ContentList content = _playlist->content ();
90
91         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
92
93                 if (!(*i)->paths_valid ()) {
94                         continue;
95                 }
96                 
97                 shared_ptr<Decoder> decoder;
98                 optional<FrameRateChange> frc;
99
100                 /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */
101                 DCPTime best_overlap_t;
102                 shared_ptr<VideoContent> best_overlap;
103                 for (ContentList::iterator j = content.begin(); j != content.end(); ++j) {
104                         shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> (*j);
105                         if (!vc) {
106                                 continue;
107                         }
108                         
109                         DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end());
110                         if (overlap > best_overlap_t) {
111                                 best_overlap = vc;
112                                 best_overlap_t = overlap;
113                         }
114                 }
115
116                 optional<FrameRateChange> best_overlap_frc;
117                 if (best_overlap) {
118                         best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ());
119                 } else {
120                         /* No video overlap; e.g. if the DCP is just audio */
121                         best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ());
122                 }
123
124                 /* FFmpeg */
125                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
126                 if (fc) {
127                         decoder.reset (new FFmpegDecoder (fc, _film->log()));
128                         frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate());
129                 }
130
131                 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (*i);
132                 if (dc) {
133                         decoder.reset (new DCPDecoder (dc));
134                         frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate());
135                 }
136
137                 /* ImageContent */
138                 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (*i);
139                 if (ic) {
140                         /* See if we can re-use an old ImageDecoder */
141                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
142                                 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
143                                 if (imd && imd->content() == ic) {
144                                         decoder = imd;
145                                 }
146                         }
147
148                         if (!decoder) {
149                                 decoder.reset (new ImageDecoder (ic));
150                         }
151
152                         frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate());
153                 }
154
155                 /* SndfileContent */
156                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
157                 if (sc) {
158                         decoder.reset (new SndfileDecoder (sc));
159                         frc = best_overlap_frc;
160                 }
161
162                 /* SubRipContent */
163                 shared_ptr<const SubRipContent> rc = dynamic_pointer_cast<const SubRipContent> (*i);
164                 if (rc) {
165                         decoder.reset (new SubRipDecoder (rc));
166                         frc = best_overlap_frc;
167                 }
168
169                 /* DCPSubtitleContent */
170                 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (*i);
171                 if (dsc) {
172                         decoder.reset (new DCPSubtitleDecoder (dsc));
173                         frc = best_overlap_frc;
174                 }
175
176                 _pieces.push_back (shared_ptr<Piece> (new Piece (*i, decoder, frc.get ())));
177         }
178
179         _have_valid_pieces = true;
180 }
181
182 void
183 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
184 {
185         shared_ptr<Content> c = w.lock ();
186         if (!c) {
187                 return;
188         }
189
190         if (
191                 property == ContentProperty::POSITION ||
192                 property == ContentProperty::LENGTH ||
193                 property == ContentProperty::TRIM_START ||
194                 property == ContentProperty::TRIM_END ||
195                 property == ContentProperty::PATH ||
196                 property == VideoContentProperty::VIDEO_FRAME_TYPE ||
197                 property == DCPContentProperty::CAN_BE_PLAYED
198                 ) {
199                 
200                 _have_valid_pieces = false;
201                 Changed (frequent);
202
203         } else if (
204                 property == SubtitleContentProperty::USE_SUBTITLES ||
205                 property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
206                 property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
207                 property == SubtitleContentProperty::SUBTITLE_X_SCALE ||
208                 property == SubtitleContentProperty::SUBTITLE_Y_SCALE ||
209                 property == VideoContentProperty::VIDEO_CROP ||
210                 property == VideoContentProperty::VIDEO_SCALE ||
211                 property == VideoContentProperty::VIDEO_FRAME_RATE
212                 ) {
213                 
214                 Changed (frequent);
215         }
216 }
217
218 /** @param already_resampled true if this data has already been through the chain up to the resampler */
219 void
220 Player::playlist_changed ()
221 {
222         _have_valid_pieces = false;
223         Changed (false);
224 }
225
226 void
227 Player::set_video_container_size (dcp::Size s)
228 {
229         _video_container_size = s;
230
231         _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
232         _black_image->make_black ();
233 }
234
235 void
236 Player::film_changed (Film::Property p)
237 {
238         /* Here we should notice Film properties that affect our output, and
239            alert listeners that our output now would be different to how it was
240            last time we were run.
241         */
242
243         if (p == Film::SCALER || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
244                 Changed (false);
245         }
246 }
247
248 list<PositionImage>
249 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
250 {
251         list<PositionImage> all;
252         
253         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
254                 if (!i->image) {
255                         continue;
256                 }
257
258                 /* We will scale the subtitle up to fit _video_container_size */
259                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
260                 
261                 /* Then we need a corrective translation, consisting of two parts:
262                  *
263                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
264                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
265                  *
266                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
267                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
268                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
269                  *
270                  * Combining these two translations gives these expressions.
271                  */
272
273                 all.push_back (
274                         PositionImage (
275                                 i->image->scale (
276                                         scaled_size,
277                                         Scaler::from_id ("bicubic"),
278                                         i->image->pixel_format (),
279                                         true
280                                         ),
281                                 Position<int> (
282                                         rint (_video_container_size.width * i->rectangle.x),
283                                         rint (_video_container_size.height * i->rectangle.y)
284                                         )
285                                 )
286                         );
287         }
288
289         return all;
290 }
291
292 void
293 Player::set_approximate_size ()
294 {
295         _approximate_size = true;
296 }
297
298 shared_ptr<PlayerVideo>
299 Player::black_player_video_frame (DCPTime time) const
300 {
301         return shared_ptr<PlayerVideo> (
302                 new PlayerVideo (
303                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
304                         time,
305                         Crop (),
306                         optional<float> (),
307                         _video_container_size,
308                         _video_container_size,
309                         Scaler::from_id ("bicubic"),
310                         EYES_BOTH,
311                         PART_WHOLE,
312                         Config::instance()->colour_conversions().front().conversion
313                 )
314         );
315 }
316
317 /** @return All PlayerVideos at the given time (there may be two frames for 3D) */
318 list<shared_ptr<PlayerVideo> >
319 Player::get_video (DCPTime time, bool accurate)
320 {
321         if (!_have_valid_pieces) {
322                 setup_pieces ();
323         }
324         
325         list<shared_ptr<Piece> > ov = overlaps<VideoContent> (
326                 time,
327                 time + DCPTime::from_frames (1, _film->video_frame_rate ())
328                 );
329
330         list<shared_ptr<PlayerVideo> > pvf;
331
332         if (ov.empty ()) {
333                 /* No video content at this time */
334                 pvf.push_back (black_player_video_frame (time));
335         } else {
336                 /* Create a PlayerVideo from the content's video at this time */
337
338                 shared_ptr<Piece> piece = ov.back ();
339                 shared_ptr<VideoDecoder> decoder = dynamic_pointer_cast<VideoDecoder> (piece->decoder);
340                 assert (decoder);
341                 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
342                 assert (content);
343
344                 list<ContentVideo> content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate);
345                 if (content_video.empty ()) {
346                         pvf.push_back (black_player_video_frame (time));
347                         return pvf;
348                 }
349                 
350                 dcp::Size image_size = content->scale().size (content, _video_container_size, _film->frame_size (), _approximate_size ? 4 : 1);
351                 if (_approximate_size) {
352                         image_size.width &= ~3;
353                         image_size.height &= ~3;
354                 }
355                 
356                 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
357                         pvf.push_back (
358                                 shared_ptr<PlayerVideo> (
359                                         new PlayerVideo (
360                                                 i->image,
361                                                 content_video_to_dcp (piece, i->frame),
362                                                 content->crop (),
363                                                 content->fade (i->frame),
364                                                 image_size,
365                                                 _video_container_size,
366                                                 _film->scaler(),
367                                                 i->eyes,
368                                                 i->part,
369                                                 content->colour_conversion ()
370                                                 )
371                                         )
372                                 );
373                 }
374         }
375
376         /* Add subtitles (for possible burn-in) to whatever PlayerVideos we got */
377
378         PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false);
379
380         list<PositionImage> sub_images;
381
382         /* Image subtitles */
383         list<PositionImage> c = transform_image_subtitles (ps.image);
384         copy (c.begin(), c.end(), back_inserter (sub_images));
385
386         /* Text subtitles (rendered to images) */
387         sub_images.push_back (render_subtitles (ps.text, _video_container_size));
388         
389         if (!sub_images.empty ()) {
390                 for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
391                         (*i)->set_subtitle (merge (sub_images));
392                 }
393         }       
394                 
395         return pvf;
396 }
397
398 shared_ptr<AudioBuffers>
399 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
400 {
401         if (!_have_valid_pieces) {
402                 setup_pieces ();
403         }
404
405         AudioFrame const length_frames = length.frames (_film->audio_frame_rate ());
406
407         shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
408         audio->make_silent ();
409         
410         list<shared_ptr<Piece> > ov = overlaps<AudioContent> (time, time + length);
411         if (ov.empty ()) {
412                 return audio;
413         }
414
415         for (list<shared_ptr<Piece> >::iterator i = ov.begin(); i != ov.end(); ++i) {
416
417                 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> ((*i)->content);
418                 assert (content);
419                 shared_ptr<AudioDecoder> decoder = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
420                 assert (decoder);
421
422                 if (content->audio_frame_rate() == 0) {
423                         /* This AudioContent has no audio (e.g. if it is an FFmpegContent with no
424                          * audio stream).
425                          */
426                         continue;
427                 }
428
429                 /* The time that we should request from the content */
430                 DCPTime request = time - DCPTime::from_seconds (content->audio_delay() / 1000.0);
431                 DCPTime offset;
432                 if (request < DCPTime ()) {
433                         /* We went off the start of the content, so we will need to offset
434                            the stuff we get back.
435                         */
436                         offset = -request;
437                         request = DCPTime ();
438                 }
439
440                 AudioFrame const content_frame = dcp_to_content_audio (*i, request);
441
442                 /* Audio from this piece's decoder (which might be more or less than what we asked for) */
443                 shared_ptr<ContentAudio> all = decoder->get_audio (content_frame, length_frames, accurate);
444
445                 /* Gain */
446                 if (content->audio_gain() != 0) {
447                         shared_ptr<AudioBuffers> gain (new AudioBuffers (all->audio));
448                         gain->apply_gain (content->audio_gain ());
449                         all->audio = gain;
450                 }
451
452                 /* Remap channels */
453                 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all->audio->frames()));
454                 dcp_mapped->make_silent ();
455                 AudioMapping map = content->audio_mapping ();
456                 for (int i = 0; i < map.content_channels(); ++i) {
457                         for (int j = 0; j < _film->audio_channels(); ++j) {
458                                 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
459                                         dcp_mapped->accumulate_channel (
460                                                 all->audio.get(),
461                                                 i,
462                                                 j,
463                                                 map.get (i, static_cast<dcp::Channel> (j))
464                                                 );
465                                 }
466                         }
467                 }
468                 
469                 all->audio = dcp_mapped;
470
471                 audio->accumulate_frames (
472                         all->audio.get(),
473                         content_frame - all->frame,
474                         offset.frames (_film->audio_frame_rate()),
475                         min (AudioFrame (all->audio->frames()), length_frames) - offset.frames (_film->audio_frame_rate ())
476                         );
477         }
478
479         return audio;
480 }
481
482 VideoFrame
483 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
484 {
485         /* s is the offset of t from the start position of this content */
486         DCPTime s = t - piece->content->position ();
487         s = DCPTime (max (DCPTime::Type (0), s.get ()));
488         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
489
490         /* Convert this to the content frame */
491         return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) * piece->frc.factor ();
492 }
493
494 DCPTime
495 Player::content_video_to_dcp (shared_ptr<const Piece> piece, VideoFrame f) const
496 {
497         DCPTime t = DCPTime::from_frames (f / piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position ();
498         if (t < DCPTime ()) {
499                 t = DCPTime ();
500         }
501
502         return t;
503 }
504
505 AudioFrame
506 Player::dcp_to_content_audio (shared_ptr<const Piece> piece, DCPTime t) const
507 {
508         /* s is the offset of t from the start position of this content */
509         DCPTime s = t - piece->content->position ();
510         s = DCPTime (max (DCPTime::Type (0), s.get ()));
511         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
512
513         /* Convert this to the content frame */
514         return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate());
515 }
516
517 ContentTime
518 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
519 {
520         /* s is the offset of t from the start position of this content */
521         DCPTime s = t - piece->content->position ();
522         s = DCPTime (max (DCPTime::Type (0), s.get ()));
523         s = DCPTime (min (piece->content->length_after_trim().get(), s.get()));
524
525         return ContentTime (s + piece->content->trim_start(), piece->frc);
526 }
527
528 void
529 PlayerStatistics::dump (shared_ptr<Log> log) const
530 {
531         log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL);
532         log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL);
533 }
534
535 PlayerStatistics const &
536 Player::statistics () const
537 {
538         return _statistics;
539 }
540
541 PlayerSubtitles
542 Player::get_subtitles (DCPTime time, DCPTime length, bool starting)
543 {
544         list<shared_ptr<Piece> > subs = overlaps<SubtitleContent> (time, time + length);
545
546         PlayerSubtitles ps (time, length);
547
548         for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
549                 shared_ptr<SubtitleContent> subtitle_content = dynamic_pointer_cast<SubtitleContent> ((*j)->content);
550                 if (!subtitle_content->use_subtitles ()) {
551                         continue;
552                 }
553
554                 /* XXX: this will break down if we have multiple subtitle content */
555                 ps.language = subtitle_content->subtitle_language();
556                 if (ps.language.empty ()) {
557                         ps.language = _("Unknown");
558                 }
559
560                 shared_ptr<SubtitleDecoder> subtitle_decoder = dynamic_pointer_cast<SubtitleDecoder> ((*j)->decoder);
561                 ContentTime const from = dcp_to_content_subtitle (*j, time);
562                 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
563                 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
564
565                 list<ContentImageSubtitle> image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting);
566                 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
567                         
568                         /* Apply content's subtitle offsets */
569                         i->sub.rectangle.x += subtitle_content->subtitle_x_offset ();
570                         i->sub.rectangle.y += subtitle_content->subtitle_y_offset ();
571
572                         /* Apply content's subtitle scale */
573                         i->sub.rectangle.width *= subtitle_content->subtitle_x_scale ();
574                         i->sub.rectangle.height *= subtitle_content->subtitle_y_scale ();
575
576                         /* Apply a corrective translation to keep the subtitle centred after that scale */
577                         i->sub.rectangle.x -= i->sub.rectangle.width * (subtitle_content->subtitle_x_scale() - 1);
578                         i->sub.rectangle.y -= i->sub.rectangle.height * (subtitle_content->subtitle_y_scale() - 1);
579                         
580                         ps.image.push_back (i->sub);
581                 }
582
583                 list<ContentTextSubtitle> text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting);
584                 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
585                         BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) {
586                                 s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ());
587                                 s.set_size (s.size() * max (subtitle_content->subtitle_x_scale(), subtitle_content->subtitle_y_scale()));
588                                 ps.text.push_back (s);
589                         }
590                 }
591         }
592
593         return ps;
594 }