- if (have_audio && have_video) {
- _pts_offset = - min (c->first_video().get(), c->audio_stream()->first_audio.get());
- } else if (have_video) {
- _pts_offset = - c->first_video().get();
- } else if (have_audio) {
- _pts_offset = - c->audio_stream()->first_audio.get();
+ vector<shared_ptr<FFmpegAudioStream> > streams = c->ffmpeg_audio_streams ();
+
+ _pts_offset = ContentTime::min ();
+
+ if (c->first_video ()) {
+ _pts_offset = - c->first_video().get ();
+ }
+
+ BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, streams) {
+ if (i->first_audio) {
+ _pts_offset = max (_pts_offset, - i->first_audio.get ());
+ }
+ }
+
+ /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
+ I don't think we ever want to do that, as it seems things at -ve PTS are not meant
+ to be seen (use for alignment bars etc.); see mantis #418.
+ */
+ if (_pts_offset > ContentTime ()) {
+ _pts_offset = ContentTime ();