#include "content_subtitle.h"
#include "dcp_decoder.h"
#include "image_decoder.h"
-#include "resampler.h"
#include "compose.hpp"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
, _always_burn_subtitles (false)
, _fast (false)
, _play_referenced (false)
- , _last_seek_accurate (true)
, _audio_merger (_film->audio_frame_rate())
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
}
}
- _last_video_time = optional<DCPTime> ();
- _last_audio_time = optional<DCPTime> ();
+ _last_video_time = DCPTime ();
+ _last_audio_time = DCPTime ();
_have_valid_pieces = true;
}
setup_pieces ();
}
- shared_ptr<Piece> earliest;
- DCPTime earliest_content;
-
- BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
- if (!i->done) {
- DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
- if (!earliest || t < earliest_content) {
- earliest_content = t;
- earliest = i;
- }
- }
- }
-
- if (earliest) {
- earliest->done = earliest->decoder->pass ();
- if (earliest->done && earliest->content->audio) {
- /* Flush the Player audio system for this piece */
- BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
- audio_flush (earliest, i);
- }
- }
- }
-
- /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
- NOT to fill gaps within content (the latter is done in ::video())
- */
- DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
-
- /* Work out where to fill video from */
- optional<DCPTime> video_fill_from;
- if (_last_video_time && !_playlist->video_content_at(_last_video_time.get())) {
- /* No seek; fill towards the next thing that might happen (or the end of the playlist) */
- video_fill_from = _last_video_time;
- } else if (_last_seek_time && !_playlist->video_content_at(_last_seek_time.get())) {
- /* Seek into an empty area; fill from the seek time */
- video_fill_from = _last_seek_time;
- }
-
bool filled = false;
- if (video_fill_from && ((fill_towards - video_fill_from.get())) > one_video_frame()) {
- emit_video (black_player_video_frame(), video_fill_from.get());
+ if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) {
+ /* _last_video_time is the time just after the last video we emitted, and there is no video content
+ at this time so we need to emit some black.
+ */
+ emit_video (black_player_video_frame(), *_last_video_time);
filled = true;
} else if (_playlist->length() == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
filled = true;
}
- optional<DCPTime> audio_fill_from;
- if (_last_audio_time && !_playlist->audio_content_at(_last_audio_time.get())) {
- /* No seek; fill from the last thing that happened */
- audio_fill_from = _last_audio_time;
- } else if (_last_seek_time && !_playlist->audio_content_at(_last_seek_time.get())) {
- /* Seek into an empty area; fill from the seek time */
- audio_fill_from = _last_seek_time;
- }
-
- if (audio_fill_from && audio_fill_from < fill_towards) {
- DCPTimePeriod period (audio_fill_from.get(), fill_towards);
+ if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) {
+ /* _last_audio_time is the time just after the last audio we emitted. There is no audio here
+ so we need to emit some silence.
+ */
+ shared_ptr<Content> next = _playlist->next_audio_content(*_last_audio_time);
+ DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length());
if (period.duration() > one_video_frame()) {
- period.to = period.from + one_video_frame();
+ period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame());
}
fill_audio (period);
filled = true;
}
- if (!earliest && !filled) {
- return true;
+ /* Now pass() the decoder which is farthest behind where we are */
+
+ shared_ptr<Piece> earliest;
+ DCPTime earliest_content;
+
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ if (!i->done) {
+ DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+ if (!earliest || t < earliest_content) {
+ earliest_content = t;
+ earliest = i;
+ }
+ }
+ }
+
+ if (!filled && earliest) {
+ earliest->done = earliest->decoder->pass ();
}
/* Emit any audio that is ready */
- DCPTime pull_from = _playlist->length ();
+ DCPTime pull_to = _playlist->length ();
for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
- if (!i->second.piece->done && i->second.last_push_end < pull_from) {
- pull_from = i->second.last_push_end;
+ if (!i->second.piece->done && i->second.last_push_end < pull_to) {
+ pull_to = i->second.last_push_end;
}
}
- list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
+ list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
- if (_last_audio_time && i->second < _last_audio_time.get()) {
+ if (_last_audio_time && i->second < *_last_audio_time) {
/* There has been an accurate seek and we have received some audio before the seek time;
discard it.
*/
}
if (_last_audio_time) {
- fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
+ fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
}
emit_audio (i->first, i->second);
}
- return false;
+ return !earliest && !filled;
}
optional<PositionImage>
DCPTime const time = content_video_to_dcp (piece, video.frame);
DCPTimePeriod const period (time, time + one_video_frame());
- /* Discard if it's outside the content's period or if it's before the last accurate seek */
- if (
- time < piece->content->position() ||
- time >= piece->content->end() ||
- (_last_seek_time && _last_seek_accurate && time < _last_seek_time.get())) {
- return;
- }
-
- /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video. We have to do this here
- as in the problematic case we are about to emit a frame which is not contiguous with the previous.
- */
+ /* Fill gaps that we discover now that we have some video which needs to be emitted */
+ optional<DCPTime> fill_to;
if (_last_video_time) {
+ fill_to = _last_video_time;
+ }
+
+ if (fill_to) {
/* XXX: this may not work for 3D */
- BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (_last_video_time.get(), time), _no_video)) {
+ BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
- if (_last_video) {
- emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
+ LastVideoMap::const_iterator k = _last_video.find (wp);
+ if (k != _last_video.end ()) {
+ emit_video (k->second, j);
} else {
emit_video (black_player_video_frame(), j);
}
}
}
- _last_video.reset (
+ /* Discard if it's outside the content's period or if it's before the last accurate seek */
+ if (
+ time < piece->content->position() ||
+ time >= piece->content->end() ||
+ (_last_video_time && time < *_last_video_time)) {
+ return;
+ }
+
+ _last_video[wp].reset (
new PlayerVideo (
video.image,
piece->content->video->crop (),
)
);
- emit_video (_last_video, time);
-}
-
-void
-Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
-{
- shared_ptr<AudioContent> content = piece->content->audio;
- DCPOMATIC_ASSERT (content);
-
- shared_ptr<Resampler> r = resampler (content, stream, false);
- if (!r) {
- return;
- }
-
- pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
- if (ro.first->frames() == 0) {
- return;
- }
-
- ContentAudio content_audio;
- content_audio.audio = ro.first;
- content_audio.frame = ro.second;
-
- /* Compute time in the DCP */
- DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
-
- audio_transform (content, stream, content_audio, time);
+ emit_video (_last_video[wp], time);
}
/** Do our common processing on some audio */
/* Remap */
- shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
- dcp_mapped->make_silent ();
-
- AudioMapping map = stream->mapping ();
- for (int i = 0; i < map.input_channels(); ++i) {
- for (int j = 0; j < dcp_mapped->channels(); ++j) {
- if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
- dcp_mapped->accumulate_channel (
- content_audio.audio.get(),
- i,
- static_cast<dcp::Channel> (j),
- map.get (i, static_cast<dcp::Channel> (j))
- );
- }
- }
- }
-
- content_audio.audio = dcp_mapped;
+ content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
/* Process */
shared_ptr<AudioContent> content = piece->content->audio;
DCPOMATIC_ASSERT (content);
- /* Resample */
- if (stream->frame_rate() != content->resampled_frame_rate()) {
- shared_ptr<Resampler> r = resampler (content, stream, true);
- pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
- if (ro.first->frames() == 0) {
- return;
- }
- content_audio.audio = ro.first;
- content_audio.frame = ro.second;
- }
-
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
/* And the end of this block in the DCP */
_audio_processor->flush ();
}
- for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
- i->second->flush ();
- i->second->reset ();
- }
-
_audio_merger.clear ();
_active_subtitles.clear ();
}
}
- _last_video_time = optional<DCPTime> ();
- _last_audio_time = optional<DCPTime> ();
- _last_seek_time = time;
- _last_seek_accurate = accurate;
-}
-
-shared_ptr<Resampler>
-Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
-{
- ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
- if (i != _resamplers.end ()) {
- return i->second;
+ if (accurate) {
+ _last_video_time = time;
+ _last_audio_time = time;
+ } else {
+ _last_video_time = optional<DCPTime>();
+ _last_audio_time = optional<DCPTime>();
}
- if (!create) {
- return shared_ptr<Resampler> ();
- }
-
- LOG_GENERAL (
- "Creating new resampler from %1 to %2 with %3 channels",
- stream->frame_rate(),
- content->resampled_frame_rate(),
- stream->channels()
- );
-
- shared_ptr<Resampler> r (
- new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
- );
-
- _resamplers[make_pair(content, stream)] = r;
- return r;
+ _last_video.clear ();
}
void
if (subtitles) {
pv->set_subtitle (subtitles.get ());
}
+
+ cout << "Player emit @ " << to_string(time) << "\n";
Video (pv, time);
- _last_video_time = time + one_video_frame();
- _active_subtitles.clear_before (time);
+
+ if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+ _last_video_time = time + one_video_frame();
+ _active_subtitles.clear_before (time);
+ }
}
void
Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
{
Audio (data, time);
- _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
+ _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
}
void
Player::fill_audio (DCPTimePeriod period)
{
+ if (period.from == period.to) {
+ return;
+ }
+
+ DCPOMATIC_ASSERT (period.from < period.to);
+
BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
DCPTime t = i.from;
while (t < i.to) {