f792655586362cac6b637e91358be4f2dd0ebfd6
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17
18 */
19
20 #include <stdint.h>
21 #include "player.h"
22 #include "film.h"
23 #include "ffmpeg_decoder.h"
24 #include "ffmpeg_content.h"
25 #include "still_image_decoder.h"
26 #include "still_image_content.h"
27 #include "moving_image_decoder.h"
28 #include "moving_image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
32 #include "playlist.h"
33 #include "job.h"
34 #include "image.h"
35 #include "ratio.h"
36 #include "resampler.h"
37 #include "log.h"
38 #include "scaler.h"
39
40 using std::list;
41 using std::cout;
42 using std::min;
43 using std::max;
44 using std::vector;
45 using std::pair;
46 using std::map;
47 using boost::shared_ptr;
48 using boost::weak_ptr;
49 using boost::dynamic_pointer_cast;
50
51 class Piece
52 {
53 public:
54         Piece (shared_ptr<Content> c)
55                 : content (c)
56                 , video_position (c->position ())
57                 , audio_position (c->position ())
58         {}
59         
60         Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
61                 : content (c)
62                 , decoder (d)
63                 , video_position (c->position ())
64                 , audio_position (c->position ())
65         {}
66         
67         shared_ptr<Content> content;
68         shared_ptr<Decoder> decoder;
69         Time video_position;
70         Time audio_position;
71 };
72
73 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
74         : _film (f)
75         , _playlist (p)
76         , _video (true)
77         , _audio (true)
78         , _have_valid_pieces (false)
79         , _video_position (0)
80         , _audio_position (0)
81         , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
82         , _last_emit_was_black (false)
83 {
84         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
85         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
86         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
87         set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
88 }
89
90 void
91 Player::disable_video ()
92 {
93         _video = false;
94 }
95
96 void
97 Player::disable_audio ()
98 {
99         _audio = false;
100 }
101
102 bool
103 Player::pass ()
104 {
105         if (!_have_valid_pieces) {
106                 setup_pieces ();
107                 _have_valid_pieces = true;
108         }
109
110         Time earliest_t = TIME_MAX;
111         shared_ptr<Piece> earliest;
112         enum {
113                 VIDEO,
114                 AUDIO
115         } type = VIDEO;
116
117         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
118                 if ((*i)->decoder->done ()) {
119                         continue;
120                 }
121
122                 if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
123                         if ((*i)->video_position < earliest_t) {
124                                 earliest_t = (*i)->video_position;
125                                 earliest = *i;
126                                 type = VIDEO;
127                         }
128                 }
129
130                 if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
131                         if ((*i)->audio_position < earliest_t) {
132                                 earliest_t = (*i)->audio_position;
133                                 earliest = *i;
134                                 type = AUDIO;
135                         }
136                 }
137         }
138
139         if (!earliest) {
140                 flush ();
141                 return true;
142         }
143
144         switch (type) {
145         case VIDEO:
146                 if (earliest_t > _video_position) {
147                         emit_black ();
148                 } else {
149                         earliest->decoder->pass ();
150                 }
151                 break;
152
153         case AUDIO:
154                 if (earliest_t > _audio_position) {
155                         emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
156                 } else {
157                         earliest->decoder->pass ();
158
159                         if (earliest->decoder->done()) {
160                                 shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
161                                 assert (ac);
162                                 shared_ptr<Resampler> re = resampler (ac, false);
163                                 if (re) {
164                                         shared_ptr<const AudioBuffers> b = re->flush ();
165                                         if (b->frames ()) {
166                                                 process_audio (earliest, b, ac->audio_length ());
167                                         }
168                                 }
169                         }
170                 }
171                 break;
172         }
173
174         if (_audio) {
175                 Time audio_done_up_to = TIME_MAX;
176                 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
177                         if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
178                                 audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
179                         }
180                 }
181
182                 TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
183                 Audio (tb.audio, tb.time);
184                 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
185         }
186                 
187         return false;
188 }
189
190 void
191 Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
192 {
193         /* Keep a note of what came in so that we can repeat it if required */
194         _last_process_video.weak_piece = weak_piece;
195         _last_process_video.image = image;
196         _last_process_video.eyes = eyes;
197         _last_process_video.same = same;
198         _last_process_video.frame = frame;
199         
200         shared_ptr<Piece> piece = weak_piece.lock ();
201         if (!piece) {
202                 return;
203         }
204
205         shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
206         assert (content);
207
208         FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
209         if (frc.skip && (frame % 2) == 1) {
210                 return;
211         }
212
213         Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
214         if (content->trimmed (relative_time)) {
215                 return;
216         }
217
218         /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
219         shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
220
221         work_image = work_image->crop (content->crop(), true);
222
223         float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
224         libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
225         
226         work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
227
228         Time time = content->position() + relative_time - content->trim_start ();
229             
230         if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
231                 work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
232         }
233
234         if (image_size != _video_container_size) {
235                 assert (image_size.width <= _video_container_size.width);
236                 assert (image_size.height <= _video_container_size.height);
237                 shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
238                 im->make_black ();
239                 im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
240                 work_image = im;
241         }
242
243 #ifdef DCPOMATIC_DEBUG
244         _last_video = piece->content;
245 #endif
246
247         Video (work_image, eyes, content->colour_conversion(), same, time);
248         time += TIME_HZ / _film->video_frame_rate();
249
250         if (frc.repeat) {
251                 Video (work_image, eyes, content->colour_conversion(), true, time);
252                 time += TIME_HZ / _film->video_frame_rate();
253         }
254
255         _last_emit_was_black = false;
256
257         _video_position = piece->video_position = time;
258 }
259
260 void
261 Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
262 {
263         shared_ptr<Piece> piece = weak_piece.lock ();
264         if (!piece) {
265                 return;
266         }
267
268         shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
269         assert (content);
270
271         /* Gain */
272         if (content->audio_gain() != 0) {
273                 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
274                 gain->apply_gain (content->audio_gain ());
275                 audio = gain;
276         }
277
278         /* Resample */
279         if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
280                 shared_ptr<Resampler> r = resampler (content, true);
281                 pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
282                 audio = ro.first;
283                 frame = ro.second;
284         }
285         
286         Time const relative_time = _film->audio_frames_to_time (frame);
287
288         if (content->trimmed (relative_time)) {
289                 return;
290         }
291
292         Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
293         
294         /* Remap channels */
295         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
296         dcp_mapped->make_silent ();
297         list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
298         for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
299                 if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
300                         dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
301                 }
302         }
303
304         audio = dcp_mapped;
305
306         /* We must cut off anything that comes before the start of all time */
307         if (time < 0) {
308                 int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
309                 if (frames >= audio->frames ()) {
310                         return;
311                 }
312
313                 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
314                 trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
315
316                 audio = trimmed;
317                 time = 0;
318         }
319
320         _audio_merger.push (audio, time);
321         piece->audio_position += _film->audio_frames_to_time (audio->frames ());
322 }
323
324 void
325 Player::flush ()
326 {
327         TimedAudioBuffers<Time> tb = _audio_merger.flush ();
328         if (tb.audio) {
329                 Audio (tb.audio, tb.time);
330                 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
331         }
332
333         while (_video_position < _audio_position) {
334                 emit_black ();
335         }
336
337         while (_audio_position < _video_position) {
338                 emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
339         }
340         
341 }
342
343 /** Seek so that the next pass() will yield (approximately) the requested frame.
344  *  Pass accurate = true to try harder to get close to the request.
345  *  @return true on error
346  */
347 void
348 Player::seek (Time t, bool accurate)
349 {
350         if (!_have_valid_pieces) {
351                 setup_pieces ();
352                 _have_valid_pieces = true;
353         }
354
355         if (_pieces.empty ()) {
356                 return;
357         }
358
359         for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
360                 shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
361                 if (!vc) {
362                         continue;
363                 }
364                 
365                 Time s = t - vc->position ();
366                 s = max (static_cast<Time> (0), s);
367                 s = min (vc->length_after_trim(), s);
368
369                 (*i)->video_position = (*i)->audio_position = vc->position() + s;
370
371                 FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
372                 /* Here we are converting from time (in the DCP) to a frame number in the content.
373                    Hence we need to use the DCP's frame rate and the double/skip correction, not
374                    the source's rate.
375                 */
376                 VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
377                 dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
378         }
379
380         _video_position = _audio_position = t;
381         
382         /* XXX: don't seek audio because we don't need to... */
383 }
384
385 void
386 Player::setup_pieces ()
387 {
388         list<shared_ptr<Piece> > old_pieces = _pieces;
389
390         _pieces.clear ();
391
392         ContentList content = _playlist->content ();
393         sort (content.begin(), content.end(), ContentSorter ());
394
395         for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
396
397                 shared_ptr<Piece> piece (new Piece (*i));
398
399                 /* XXX: into content? */
400
401                 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
402                 if (fc) {
403                         shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
404                         
405                         fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
406                         fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
407                         fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
408
409                         piece->decoder = fd;
410                 }
411                 
412                 shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
413                 if (ic) {
414                         shared_ptr<StillImageDecoder> id;
415                         
416                         /* See if we can re-use an old StillImageDecoder */
417                         for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
418                                 shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
419                                 if (imd && imd->content() == ic) {
420                                         id = imd;
421                                 }
422                         }
423
424                         if (!id) {
425                                 id.reset (new StillImageDecoder (_film, ic));
426                                 id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
427                         }
428
429                         piece->decoder = id;
430                 }
431
432                 shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
433                 if (mc) {
434                         shared_ptr<MovingImageDecoder> md;
435
436                         if (!md) {
437                                 md.reset (new MovingImageDecoder (_film, mc));
438                                 md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
439                         }
440
441                         piece->decoder = md;
442                 }
443
444                 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
445                 if (sc) {
446                         shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
447                         sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
448
449                         piece->decoder = sd;
450                 }
451
452                 _pieces.push_back (piece);
453         }
454 }
455
456 void
457 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
458 {
459         shared_ptr<Content> c = w.lock ();
460         if (!c) {
461                 return;
462         }
463
464         if (
465                 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
466                 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
467                 ) {
468                 
469                 _have_valid_pieces = false;
470                 Changed (frequent);
471
472         } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
473
474                 update_subtitle ();
475                 Changed (frequent);
476
477         } else if (
478                 property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
479                 property == VideoContentProperty::VIDEO_RATIO
480                 ) {
481                 
482                 Changed (frequent);
483         }
484 }
485
486 void
487 Player::playlist_changed ()
488 {
489         _have_valid_pieces = false;
490         Changed (false);
491 }
492
493 void
494 Player::set_video_container_size (libdcp::Size s)
495 {
496         _video_container_size = s;
497         _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
498         _black_frame->make_black ();
499 }
500
501 shared_ptr<Resampler>
502 Player::resampler (shared_ptr<AudioContent> c, bool create)
503 {
504         map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
505         if (i != _resamplers.end ()) {
506                 return i->second;
507         }
508
509         if (!create) {
510                 return shared_ptr<Resampler> ();
511         }
512         
513         shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
514         _resamplers[c] = r;
515         return r;
516 }
517
518 void
519 Player::emit_black ()
520 {
521 #ifdef DCPOMATIC_DEBUG
522         _last_video.reset ();
523 #endif
524         
525         Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
526         _video_position += _film->video_frames_to_time (1);
527         _last_emit_was_black = true;
528 }
529
530 void
531 Player::emit_silence (OutputAudioFrame most)
532 {
533         if (most == 0) {
534                 return;
535         }
536         
537         OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
538         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
539         silence->make_silent ();
540         Audio (silence, _audio_position);
541         _audio_position += _film->audio_frames_to_time (N);
542 }
543
544 void
545 Player::film_changed (Film::Property p)
546 {
547         /* Here we should notice Film properties that affect our output, and
548            alert listeners that our output now would be different to how it was
549            last time we were run.
550         */
551
552         if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
553                 Changed (false);
554         }
555 }
556
557 void
558 Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
559 {
560         _in_subtitle.piece = weak_piece;
561         _in_subtitle.image = image;
562         _in_subtitle.rect = rect;
563         _in_subtitle.from = from;
564         _in_subtitle.to = to;
565
566         update_subtitle ();
567 }
568
569 void
570 Player::update_subtitle ()
571 {
572         shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
573         if (!piece) {
574                 return;
575         }
576
577         if (!_in_subtitle.image) {
578                 _out_subtitle.image.reset ();
579                 return;
580         }
581
582         shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
583         assert (sc);
584
585         dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
586         libdcp::Size scaled_size;
587
588         in_rect.y += sc->subtitle_offset ();
589
590         /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
591         scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
592         scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
593
594         /* Then we need a corrective translation, consisting of two parts:
595          *
596          * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
597          *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
598          *
599          * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
600          *     (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
601          *     (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
602          *
603          * Combining these two translations gives these expressions.
604          */
605         
606         _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
607         _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
608         
609         _out_subtitle.image = _in_subtitle.image->scale (
610                 scaled_size,
611                 Scaler::from_id ("bicubic"),
612                 _in_subtitle.image->pixel_format (),
613                 true
614                 );
615         _out_subtitle.from = _in_subtitle.from + piece->content->position ();
616         _out_subtitle.to = _in_subtitle.to + piece->content->position ();
617 }
618
619 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
620  *  @return false if this could not be done.
621  */
622 bool
623 Player::repeat_last_video ()
624 {
625         if (!_last_process_video.image) {
626                 return false;
627         }
628
629         process_video (
630                 _last_process_video.weak_piece,
631                 _last_process_video.image,
632                 _last_process_video.eyes,
633                 _last_process_video.same,
634                 _last_process_video.frame
635                 );
636
637         return true;
638 }