+ if (earliest_audio != TIME_MAX) {
+ TimedAudioBuffers<DCPTime> tb = _audio_merger.pull (max (int64_t (0), earliest_audio));
+ Audio (tb.audio, tb.time);
+ /* This assumes that the audio_frames_to_time conversion is exact
+ so that there are no accumulated errors caused by rounding.
+ */
+ _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
+ }
+
+ /* Emit the earliest thing */
+
+ shared_ptr<DecodedVideo> dv = dynamic_pointer_cast<DecodedVideo> (earliest_decoded);
+ shared_ptr<DecodedAudio> da = dynamic_pointer_cast<DecodedAudio> (earliest_decoded);
+ shared_ptr<DecodedSubtitle> ds = dynamic_pointer_cast<DecodedSubtitle> (earliest_decoded);
+
+ /* Will be set to false if we shouldn't consume the peeked DecodedThing */
+ bool consume = true;
+
+ /* This is the margin either side of _{video,audio}_position that we will accept
+ as a starting point for a frame consecutive to the previous.
+ */
+ DCPTime const margin = TIME_HZ / (2 * _film->video_frame_rate ());
+
+ if (dv && _video) {
+
+ if (_just_did_inaccurate_seek) {
+
+ /* Just emit; no subtlety */
+ emit_video (earliest_piece, dv);
+ step_video_position (dv);
+
+ } else if (dv->dcp_time - _video_position > margin) {
+
+ /* Too far ahead */
+
+ list<shared_ptr<Piece> >::iterator i = _pieces.begin();
+ while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) {
+ ++i;
+ }
+
+ if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) {
+ /* We're outside all video content */
+ emit_black ();
+ } else {
+ /* We're inside some video; repeat the frame */
+ _last_incoming_video.video->dcp_time = _video_position;
+ emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video);
+ step_video_position (_last_incoming_video.video);
+ }
+
+ consume = false;
+
+ } else if (abs (dv->dcp_time - _video_position) < margin) {
+ /* We're ok */
+ emit_video (earliest_piece, dv);
+ step_video_position (dv);