/*
- Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "raw_image_proxy.h"
#include "ratio.h"
#include "log.h"
-#include "render_subtitles.h"
+#include "render_text.h"
#include "config.h"
#include "content_video.h"
#include "player_video.h"
#include "decoder.h"
#include "video_decoder.h"
#include "audio_decoder.h"
-#include "subtitle_content.h"
-#include "subtitle_decoder.h"
+#include "text_content.h"
+#include "text_decoder.h"
#include "ffmpeg_content.h"
#include "audio_content.h"
-#include "content_subtitle.h"
#include "dcp_decoder.h"
#include "image_decoder.h"
#include "compose.hpp"
+#include "shuffler.h"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
using boost::optional;
using boost::scoped_ptr;
+int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
+int const PlayerProperty::PLAYLIST = 701;
+int const PlayerProperty::FILM_CONTAINER = 702;
+int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
+int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
+
Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
: _film (film)
, _playlist (playlist)
, _fast (false)
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
+ , _shuffler (0)
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
seek (DCPTime (), true);
}
+Player::~Player ()
+{
+ delete _shuffler;
+}
+
void
Player::setup_pieces ()
{
_pieces.clear ();
+ delete _shuffler;
+ _shuffler = new Shuffler();
+ _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
+
BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
if (!i->paths_valid ()) {
}
if (decoder->video && _ignore_video) {
- decoder->video->set_ignore ();
+ decoder->video->set_ignore (true);
}
if (decoder->subtitle && _ignore_subtitle) {
- decoder->subtitle->set_ignore ();
+ decoder->subtitle->set_ignore (true);
}
shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
- if (dcp && _play_referenced) {
+ if (dcp) {
+ dcp->set_decode_referenced (_play_referenced);
if (_play_referenced) {
- dcp->set_decode_referenced ();
+ dcp->set_forced_reduction (_dcp_decode_reduction);
}
- dcp->set_forced_reduction (_dcp_decode_reduction);
}
shared_ptr<Piece> piece (new Piece (i, decoder, frc));
_pieces.push_back (piece);
if (decoder->video) {
- decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
+ if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
+ /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
+ decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
+ } else {
+ decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
+ }
}
if (decoder->audio) {
}
if (decoder->subtitle) {
- decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
- decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
- decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->BitmapStart.connect (bind (&Player::bitmap_text_start, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->PlainStart.connect (bind (&Player::plain_text_start, this, weak_ptr<Piece> (piece), _1));
+ decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1, _2));
}
}
+ _stream_states.clear ();
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
if (i->content->audio) {
BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
_silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
_last_video_time = DCPTime ();
+ _last_video_eyes = EYES_BOTH;
_last_audio_time = DCPTime ();
_have_valid_pieces = true;
}
property == ContentProperty::TRIM_END ||
property == ContentProperty::PATH ||
property == VideoContentProperty::FRAME_TYPE ||
+ property == VideoContentProperty::COLOUR_CONVERSION ||
+ property == AudioContentProperty::STREAMS ||
property == DCPContentProperty::NEEDS_ASSETS ||
property == DCPContentProperty::NEEDS_KDM ||
- property == SubtitleContentProperty::COLOUR ||
- property == SubtitleContentProperty::OUTLINE ||
- property == SubtitleContentProperty::SHADOW ||
- property == SubtitleContentProperty::EFFECT_COLOUR ||
+ property == TextContentProperty::COLOUR ||
+ property == TextContentProperty::EFFECT ||
+ property == TextContentProperty::EFFECT_COLOUR ||
property == FFmpegContentProperty::SUBTITLE_STREAM ||
- property == VideoContentProperty::COLOUR_CONVERSION
+ property == FFmpegContentProperty::FILTERS
) {
_have_valid_pieces = false;
- Changed (frequent);
+ Changed (property, frequent);
} else if (
- property == SubtitleContentProperty::LINE_SPACING ||
- property == SubtitleContentProperty::OUTLINE_WIDTH ||
- property == SubtitleContentProperty::Y_SCALE ||
- property == SubtitleContentProperty::FADE_IN ||
- property == SubtitleContentProperty::FADE_OUT ||
+ property == TextContentProperty::LINE_SPACING ||
+ property == TextContentProperty::OUTLINE_WIDTH ||
+ property == TextContentProperty::Y_SCALE ||
+ property == TextContentProperty::FADE_IN ||
+ property == TextContentProperty::FADE_OUT ||
property == ContentProperty::VIDEO_FRAME_RATE ||
- property == SubtitleContentProperty::USE ||
- property == SubtitleContentProperty::X_OFFSET ||
- property == SubtitleContentProperty::Y_OFFSET ||
- property == SubtitleContentProperty::X_SCALE ||
- property == SubtitleContentProperty::FONTS ||
+ property == TextContentProperty::USE ||
+ property == TextContentProperty::X_OFFSET ||
+ property == TextContentProperty::Y_OFFSET ||
+ property == TextContentProperty::X_SCALE ||
+ property == TextContentProperty::FONTS ||
property == VideoContentProperty::CROP ||
property == VideoContentProperty::SCALE ||
property == VideoContentProperty::FADE_IN ||
property == VideoContentProperty::FADE_OUT
) {
- Changed (frequent);
+ Changed (property, frequent);
}
}
_black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
_black_image->make_black ();
- Changed (false);
+ Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
}
void
Player::playlist_changed ()
{
_have_valid_pieces = false;
- Changed (false);
+ Changed (PlayerProperty::PLAYLIST, false);
}
void
*/
if (p == Film::CONTAINER) {
- Changed (false);
+ Changed (PlayerProperty::FILM_CONTAINER, false);
} else if (p == Film::VIDEO_FRAME_RATE) {
/* Pieces contain a FrameRateChange which contains the DCP frame rate,
so we need new pieces here.
*/
_have_valid_pieces = false;
- Changed (false);
+ Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
} else if (p == Film::AUDIO_PROCESSOR) {
if (_film->audio_processor ()) {
_audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
}
+ } else if (p == Film::AUDIO_CHANNELS) {
+ _audio_merger.clear ();
}
}
list<PositionImage>
-Player::transform_image_subtitles (list<ImageSubtitle> subs) const
+Player::transform_bitmap_texts (list<BitmapText> subs) const
{
list<PositionImage> all;
- for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
if (!i->image) {
continue;
}
/* We will scale the subtitle up to fit _video_container_size */
dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
- /* Then we need a corrective translation, consisting of two parts:
- *
- * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
- * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
- *
- * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
- * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
- * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
- *
- * Combining these two translations gives these expressions.
- */
-
all.push_back (
PositionImage (
i->image->scale (
}
shared_ptr<PlayerVideo>
-Player::black_player_video_frame () const
+Player::black_player_video_frame (Eyes eyes) const
{
return shared_ptr<PlayerVideo> (
new PlayerVideo (
optional<double> (),
_video_container_size,
_video_container_size,
- EYES_BOTH,
+ eyes,
PART_WHOLE,
- PresetColourConversion::all().front().conversion
+ PresetColourConversion::all().front().conversion,
+ boost::weak_ptr<Content>(),
+ boost::optional<Frame>()
)
);
}
Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
{
/* See comment in dcp_to_content_video */
- DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
- return max (DCPTime (), d + piece->content->position ());
+ DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
+ return d + piece->content->position();
}
Frame
if (_playlist->length() == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
- emit_video (black_player_video_frame(), DCPTime());
+ emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
return true;
}
optional<DCPTime> earliest_time;
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (!i->done) {
- DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (i->done) {
+ continue;
+ }
+
+ DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
+ if (t > i->content->end()) {
+ i->done = true;
+ } else {
+
/* Given two choices at the same time, pick the one with a subtitle so we see it before
the video.
*/
earliest_content->done = earliest_content->decoder->pass ();
break;
case BLACK:
- emit_video (black_player_video_frame(), _black.position());
+ emit_video (black_player_video_frame(EYES_BOTH), _black.position());
_black.set_position (_black.position() + one_video_frame());
break;
case SILENT:
{
DCPTimePeriod period (_silent.period_at_position());
+ if (_last_audio_time) {
+ /* Sometimes the thing that happened last finishes fractionally before
+ this silence. Bodge the start time of the silence to fix it. I'm
+ not sure if this is the right solution --- maybe the last thing should
+ be padded `forward' rather than this thing padding `back'.
+ */
+ period.from = min(period.from, *_last_audio_time);
+ }
if (period.duration() > one_video_frame()) {
period.to = period.from + one_video_frame();
}
/* Emit any audio that is ready */
+ /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
+ of our streams, or the position of the _silent.
+ */
DCPTime pull_to = _film->length ();
for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
if (!i->second.piece->done && i->second.last_push_end < pull_to) {
pull_to = i->second.last_push_end;
}
}
+ if (!_silent.done() && _silent.position() < pull_to) {
+ pull_to = _silent.position();
+ }
list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
emit_audio (i->first, i->second);
}
+ if (done) {
+ _shuffler->flush ();
+ for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
+ do_emit_video(i->first, i->second);
+ }
+ }
+
return done;
}
{
list<PositionImage> subtitles;
- BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
+ int const vfr = _film->video_frame_rate();
+
+ BOOST_FOREACH (PlayerText i, _active_text[TEXT_SUBTITLE].get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
/* Image subtitles */
- list<PositionImage> c = transform_image_subtitles (i.image);
+ list<PositionImage> c = transform_bitmap_texts (i.image);
copy (c.begin(), c.end(), back_inserter (subtitles));
/* Text subtitles (rendered to an image) */
if (!i.text.empty ()) {
- list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
+ list<PositionImage> s = render_text (i.text, i.fonts, _video_container_size, time, vfr);
copy (s.begin(), s.end(), back_inserter (subtitles));
}
}
return merge (subtitles);
}
-bool
+void
Player::video (weak_ptr<Piece> wp, ContentVideo video)
{
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return false;
+ return;
}
FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
if (frc.skip && (video.frame % 2) == 1) {
- return false;
+ return;
}
/* Time of the first frame we will emit */
DCPTime const time = content_video_to_dcp (piece, video.frame);
- /* Discard if it's outside the content's period or if it's before the last accurate seek */
- if (
- time < piece->content->position() ||
- time >= piece->content->end() ||
- (_last_video_time && time < *_last_video_time)) {
- return false;
+ /* Discard if it's before the content's period or the last accurate seek. We can't discard
+ if it's after the content's period here as in that case we still need to fill any gap between
+ `now' and the end of the content's period.
+ */
+ if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
+ return;
}
- /* Fill gaps that we discover now that we have some video which needs to be emitted */
+ /* Fill gaps that we discover now that we have some video which needs to be emitted.
+ This is where we need to fill to.
+ */
+ DCPTime fill_to = min (time, piece->content->end());
if (_last_video_time) {
- /* XXX: this may not work for 3D */
DCPTime fill_from = max (*_last_video_time, piece->content->position());
- for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
- LastVideoMap::const_iterator k = _last_video.find (wp);
- if (k != _last_video.end ()) {
- emit_video (k->second, j);
- } else {
- emit_video (black_player_video_frame(), j);
+ LastVideoMap::const_iterator last = _last_video.find (wp);
+ if (_film->three_d()) {
+ DCPTime j = fill_from;
+ Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
+ if (eyes == EYES_BOTH) {
+ eyes = EYES_LEFT;
+ }
+ while (j < fill_to || eyes != video.eyes) {
+ if (last != _last_video.end()) {
+ shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
+ copy->set_eyes (eyes);
+ emit_video (copy, j);
+ } else {
+ emit_video (black_player_video_frame(eyes), j);
+ }
+ if (eyes == EYES_RIGHT) {
+ j += one_video_frame();
+ }
+ eyes = increment_eyes (eyes);
+ }
+ } else {
+ for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
+ if (last != _last_video.end()) {
+ emit_video (last->second, j);
+ } else {
+ emit_video (black_player_video_frame(EYES_BOTH), j);
+ }
}
}
}
_video_container_size,
video.eyes,
video.part,
- piece->content->video->colour_conversion ()
+ piece->content->video->colour_conversion(),
+ piece->content,
+ video.frame
)
);
DCPTime t = time;
for (int i = 0; i < frc.repeat; ++i) {
- emit_video (_last_video[wp], t);
+ if (t < piece->content->end()) {
+ emit_video (_last_video[wp], t);
+ }
t += one_video_frame ();
}
-
- return true;
}
-/** @return Number of input frames that were `accepted'. This is the number of frames passed in
- * unless some were discarded at the end of the block.
- */
-Frame
+void
Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
{
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
- return 0;
+ return;
}
shared_ptr<AudioContent> content = piece->content->audio;
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
- /* We consider frames trimmed off the beginning to nevertheless be `accepted'; it's only frames trimmed
- off the end that are considered as discarded. This logic is necessary to ensure correct reel lengths,
- although the precise details escape me at the moment.
- */
- Frame accepted = content_audio.audio->frames();
-
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
if (!cut.first) {
/* This audio is entirely discarded */
- return 0;
+ return;
}
content_audio.audio = cut.first;
time = cut.second;
} else if (time > piece->content->end()) {
/* Discard it all */
- return 0;
+ return;
} else if (end > piece->content->end()) {
Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
if (remaining_frames == 0) {
- return 0;
+ return;
}
shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
content_audio.audio = cut;
- accepted = content_audio.audio->frames();
}
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
_audio_merger.push (content_audio.audio, time);
DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
_stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
- return accepted;
}
void
-Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> wp, ContentBitmapText subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
+ /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
+ subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->subtitle->x_scale() - 1) / 2);
+ subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->subtitle->y_scale() - 1) / 2);
+
/* Apply content's subtitle scale */
subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
- /* Apply a corrective translation to keep the subtitle centred after that scale */
- subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
- subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
-
- PlayerSubtitles ps;
+ PlayerText ps;
ps.image.push_back (subtitle.sub);
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
- _active_subtitles.add_from (wp, ps, from);
+ _active_text[subtitle.type()].add_from (wp, ps, from);
}
void
-Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
+Player::plain_text_start (weak_ptr<Piece> wp, ContentPlainText subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
if (!piece) {
return;
}
- PlayerSubtitles ps;
+ PlayerText ps;
DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
+ if (from > piece->content->end()) {
+ return;
+ }
+
BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
}
s.set_in (dcp::Time(from.seconds(), 1000));
- ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
+ ps.text.push_back (PlainText (s, piece->content->subtitle->outline_width()));
ps.add_fonts (piece->content->subtitle->fonts ());
}
- _active_subtitles.add_from (wp, ps, from);
+ _active_text[subtitle.type()].add_from (wp, ps, from);
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to, TextType type)
{
- if (!_active_subtitles.have (wp)) {
+ if (!_active_text[type].have (wp)) {
return;
}
DCPTime const dcp_to = content_time_to_dcp (piece, to);
- pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
+ if (dcp_to > piece->content->end()) {
+ return;
+ }
+
+ pair<PlayerText, DCPTime> from = _active_text[type].add_to (wp, dcp_to);
if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
setup_pieces ();
}
+ if (_shuffler) {
+ _shuffler->clear ();
+ }
+
+ _delay.clear ();
+
if (_audio_processor) {
_audio_processor->flush ();
}
_audio_merger.clear ();
- _active_subtitles.clear ();
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _active_text[i].clear ();
+ }
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
if (time < i->content->position()) {
- /* Before; seek to 0 */
- i->decoder->seek (ContentTime(), accurate);
+ /* Before; seek to the start of the content */
+ i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
i->done = false;
} else if (i->content->position() <= time && time < i->content->end()) {
/* During; seek to position */
if (accurate) {
_last_video_time = time;
+ _last_video_eyes = EYES_LEFT;
_last_audio_time = time;
} else {
_last_video_time = optional<DCPTime>();
+ _last_video_eyes = optional<Eyes>();
_last_audio_time = optional<DCPTime>();
}
void
Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
+ /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
+ player before the video that requires them.
+ */
+ _delay.push_back (make_pair (pv, time));
+
+ if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ _last_video_time = time + one_video_frame();
+ }
+ _last_video_eyes = increment_eyes (pv->eyes());
+
+ if (_delay.size() < 3) {
+ return;
+ }
+
+ pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
+ _delay.pop_front();
+ do_emit_video (to_do.first, to_do.second);
+}
+
+void
+Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+{
+ if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ for (int i = 0; i < TEXT_COUNT; ++i) {
+ _active_text[i].clear_before (time);
+ }
+ }
+
optional<PositionImage> subtitles = subtitles_for_frame (time);
if (subtitles) {
pv->set_subtitle (subtitles.get ());
}
Video (pv, time);
-
- if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
- _last_video_time = time + one_video_frame();
- _active_subtitles.clear_before (time);
- }
}
void
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
+ /* Log if the assert below is about to fail */
+ if (_last_audio_time && time != *_last_audio_time) {
+ _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
+ }
+
+ /* This audio must follow on from the previous */
+ DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
Audio (data, time);
_last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
}
_dcp_decode_reduction = reduction;
_have_valid_pieces = false;
- Changed (false);
+ Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
+}
+
+DCPTime
+Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
+{
+ if (_have_valid_pieces) {
+ setup_pieces ();
+ }
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (i->content == content) {
+ return content_time_to_dcp (i, t);
+ }
+ }
+
+ DCPOMATIC_ASSERT (false);
+ return DCPTime ();
}