Reasonably straightforward stuff; main things are adding
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 6f91922eb90e8faa786c3b8758a285277a3f3cc2..5240decb24dfd69e74650b31a2de3b13ab8c8f18 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2012-2015 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
 #include "ffmpeg_decoder.h"
 #include "ffmpeg_audio_stream.h"
 #include "ffmpeg_subtitle_stream.h"
-#include "filter_graph.h"
+#include "video_filter_graph.h"
 #include "audio_buffers.h"
 #include "ffmpeg_content.h"
 #include "raw_image_proxy.h"
 #include "film.h"
-#include "timer.h"
+#include "md5_digester.h"
+#include "compose.hpp"
+#include <dcp/subtitle_string.h>
+#include <sub/ssa_reader.h>
+#include <sub/subtitle.h>
+#include <sub/collect.h>
 extern "C" {
 #include <libavcodec/avcodec.h>
 #include <libavformat/avformat.h>
 }
 #include <boost/foreach.hpp>
-#include <stdexcept>
+#include <boost/algorithm/string.hpp>
 #include <vector>
 #include <iomanip>
 #include <iostream>
 #include <stdint.h>
-#include <sndfile.h>
 
 #include "i18n.h"
 
-#define LOG_GENERAL(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
-#define LOG_ERROR(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_ERROR);
-#define LOG_WARNING_NC(...) _video_content->film()->log()->log (__VA_ARGS__, Log::TYPE_WARNING);
-#define LOG_WARNING(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_WARNING);
+#define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
+#define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
+#define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
+#define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
 
 using std::cout;
 using std::string;
@@ -60,84 +64,42 @@ using std::vector;
 using std::list;
 using std::min;
 using std::pair;
-using std::make_pair;
 using std::max;
+using std::map;
 using boost::shared_ptr;
-using boost::optional;
-using boost::dynamic_pointer_cast;
+using boost::is_any_of;
+using boost::split;
 using dcp::Size;
 
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
-       : VideoDecoder (c)
-       , AudioDecoder (c)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
+       : VideoDecoder (c->video, log)
+       , AudioDecoder (c, fast)
        , SubtitleDecoder (c)
        , FFmpeg (c)
        , _log (log)
+       , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video->video_frame_rate()))
 {
-       /* Audio and video frame PTS values may not start with 0.  We want
-          to fiddle them so that:
 
-          1.  One of them starts at time 0.
-          2.  The first video PTS value ends up on a frame boundary.
-
-          Then we remove big initial gaps in PTS and we allow our
-          insertion of black frames to work.
-
-          We will do:
-            audio_pts_to_use = audio_pts_from_ffmpeg + pts_offset;
-            video_pts_to_use = video_pts_from_ffmpeg + pts_offset;
-       */
-
-       /* First, make one of them start at 0 */
-
-       vector<shared_ptr<FFmpegAudioStream> > streams = c->ffmpeg_audio_streams ();
-
-       _pts_offset = ContentTime::min ();
-
-       if (c->first_video ()) {
-               _pts_offset = - c->first_video().get ();
-       }
-
-       BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, streams) {
-               if (i->first_audio) {
-                       _pts_offset = max (_pts_offset, - i->first_audio.get ());
-               }
-       }
-
-       /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
-          I don't think we ever want to do that, as it seems things at -ve PTS are not meant
-          to be seen (use for alignment bars etc.); see mantis #418.
-       */
-       if (_pts_offset > ContentTime ()) {
-               _pts_offset = ContentTime ();
-       }
-
-       /* Now adjust so that the video pts starts on a frame */
-       if (c->first_video ()) {
-               ContentTime first_video = c->first_video().get() + _pts_offset;
-               ContentTime const old_first_video = first_video;
-               _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video;
-       }
 }
 
 void
 FFmpegDecoder::flush ()
 {
        /* Get any remaining frames */
-       
+
        _packet.data = 0;
        _packet.size = 0;
-       
+
        /* XXX: should we reset _packet.data and size after each *_decode_* call? */
-       
+
        while (decode_video_packet ()) {}
-       
+
        decode_audio_packet ();
        AudioDecoder::flush ();
 }
 
 bool
-FFmpegDecoder::pass ()
+FFmpegDecoder::pass (PassReason reason, bool accurate)
 {
        int r = av_read_frame (_format_context, &_packet);
 
@@ -152,7 +114,7 @@ FFmpegDecoder::pass ()
                        av_strerror (r, buf, sizeof(buf));
                        LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
                }
-               
+
                flush ();
                return true;
        }
@@ -160,15 +122,15 @@ FFmpegDecoder::pass ()
        int const si = _packet.stream_index;
        shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
 
-       if (si == _video_stream && !_ignore_video) {
+       if (si == _video_stream && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) {
                decode_video_packet ();
        } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
                decode_subtitle_packet ();
-       } else {
+       } else if (accurate || reason != PASS_REASON_SUBTITLE) {
                decode_audio_packet ();
        }
 
-       av_free_packet (&_packet);
+       av_packet_unref (&_packet);
        return false;
 }
 
@@ -176,10 +138,14 @@ FFmpegDecoder::pass ()
  *  Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
  */
 shared_ptr<AudioBuffers>
-FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t** data, int size)
+FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
 {
        DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
 
+       int const size = av_samples_get_buffer_size (
+               0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
+               );
+
        /* Deinterleave and convert to float */
 
        /* total_samples and frames will be rounded down here, so if there are stray samples at the end
@@ -192,7 +158,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
        switch (audio_sample_format (stream)) {
        case AV_SAMPLE_FMT_U8:
        {
-               uint8_t* p = reinterpret_cast<uint8_t *> (data[0]);
+               uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
@@ -206,10 +172,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
                }
        }
        break;
-       
+
        case AV_SAMPLE_FMT_S16:
        {
-               int16_t* p = reinterpret_cast<int16_t *> (data[0]);
+               int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
@@ -226,7 +192,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
 
        case AV_SAMPLE_FMT_S16P:
        {
-               int16_t** p = reinterpret_cast<int16_t **> (data);
+               int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
                for (int i = 0; i < stream->channels(); ++i) {
                        for (int j = 0; j < frames; ++j) {
                                audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
@@ -234,10 +200,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
                }
        }
        break;
-       
+
        case AV_SAMPLE_FMT_S32:
        {
-               int32_t* p = reinterpret_cast<int32_t *> (data[0]);
+               int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
@@ -252,9 +218,20 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
        }
        break;
 
+       case AV_SAMPLE_FMT_S32P:
+       {
+               int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
+               for (int i = 0; i < stream->channels(); ++i) {
+                       for (int j = 0; j < frames; ++j) {
+                               audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 31);
+                       }
+               }
+       }
+       break;
+
        case AV_SAMPLE_FMT_FLT:
        {
-               float* p = reinterpret_cast<float*> (data[0]);
+               float* p = reinterpret_cast<float*> (_frame->data[0]);
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
@@ -268,13 +245,17 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream, uint8_t
                }
        }
        break;
-               
+
        case AV_SAMPLE_FMT_FLTP:
        {
-               float** p = reinterpret_cast<float**> (data);
-               for (int i = 0; i < stream->channels(); ++i) {
+               float** p = reinterpret_cast<float**> (_frame->data);
+               /* Sometimes there aren't as many channels in the _frame as in the stream */
+               for (int i = 0; i < _frame->channels; ++i) {
                        memcpy (audio->data(i), p[i], frames * sizeof(float));
                }
+               for (int i = _frame->channels; i < stream->channels(); ++i) {
+                       audio->make_silent (i);
+               }
        }
        break;
 
@@ -314,14 +295,17 @@ FFmpegDecoder::seek (ContentTime time, bool accurate)
        /* XXX: it seems debatable whether PTS should be used here...
           http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
        */
-       
-       ContentTime const u = time - _pts_offset;
-       av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), 0);
+
+       ContentTime u = time - _pts_offset;
+       if (u < ContentTime ()) {
+               u = ContentTime ();
+       }
+       av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), AVSEEK_FLAG_BACKWARD);
 
        avcodec_flush_buffers (video_codec_context());
 
        /* XXX: should be flushing audio buffers? */
-       
+
        if (subtitle_codec_context ()) {
                avcodec_flush_buffers (subtitle_codec_context ());
        }
@@ -333,7 +317,7 @@ FFmpegDecoder::decode_audio_packet ()
        /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
           several times.
        */
-       
+
        AVPacket copy_packet = _packet;
 
        /* XXX: inefficient */
@@ -347,7 +331,7 @@ FFmpegDecoder::decode_audio_packet ()
                /* The packet's stream may not be an audio one; just ignore it in this method if so */
                return;
        }
-       
+
        while (copy_packet.size > 0) {
 
                int frame_finished;
@@ -369,18 +353,26 @@ FFmpegDecoder::decode_audio_packet ()
                }
 
                if (frame_finished) {
-                       ContentTime const ct = ContentTime::from_seconds (
+                       ContentTime ct = ContentTime::from_seconds (
                                av_frame_get_best_effort_timestamp (_frame) *
                                av_q2d ((*stream)->stream (_format_context)->time_base))
                                + _pts_offset;
-                       
-                       int const data_size = av_samples_get_buffer_size (
-                               0, (*stream)->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (*stream), 1
-                               );
 
-                       audio (*stream, deinterleave_audio (*stream, _frame->data, data_size), ct);
+                       shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
+
+                       if (ct < ContentTime ()) {
+                               /* Discard audio data that comes before time 0 */
+                               Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
+                               data->move (remove, 0, data->frames() - remove);
+                               data->set_frames (data->frames() - remove);
+                               ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
+                       }
+
+                       if (data->frames() > 0) {
+                               audio (*stream, data, ct);
+                       }
                }
-                       
+
                copy_packet.data += decode_result;
                copy_packet.size -= decode_result;
        }
@@ -396,15 +388,16 @@ FFmpegDecoder::decode_video_packet ()
 
        boost::mutex::scoped_lock lm (_filter_graphs_mutex);
 
-       shared_ptr<FilterGraph> graph;
-       
-       list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+       shared_ptr<VideoFilterGraph> graph;
+
+       list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
        while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
                ++i;
        }
 
        if (i == _filter_graphs.end ()) {
-               graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+               graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+               graph->setup (_ffmpeg_content->filters ());
                _filter_graphs.push_back (graph);
                LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
        } else {
@@ -416,12 +409,12 @@ FFmpegDecoder::decode_video_packet ()
        for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
 
                shared_ptr<Image> image = i->first;
-               
+
                if (i->second != AV_NOPTS_VALUE) {
                        double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds ();
                        video (
                                shared_ptr<ImageProxy> (new RawImageProxy (image)),
-                               rint (pts * _ffmpeg_content->video_frame_rate ())
+                               llrint (pts * _ffmpeg_content->video->video_frame_rate ())
                                );
                } else {
                        LOG_WARNING_NC ("Dropping frame without PTS");
@@ -430,7 +423,7 @@ FFmpegDecoder::decode_video_packet ()
 
        return true;
 }
-       
+
 void
 FFmpegDecoder::decode_subtitle_packet ()
 {
@@ -439,14 +432,12 @@ FFmpegDecoder::decode_subtitle_packet ()
        if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
                return;
        }
-       
+
        if (sub.num_rects <= 0) {
                /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
                   indicate that the previous subtitle should stop.  We can ignore it here.
                */
                return;
-       } else if (sub.num_rects > 1) {
-               throw DecodeError (_("multi-part subtitles not yet supported"));
        }
 
        /* Subtitle PTS (within the source, not taking into account any of the
@@ -460,38 +451,40 @@ FFmpegDecoder::decode_subtitle_packet ()
                period.to = sub_period.to.get() + _pts_offset;
        } else {
                /* We have to look up the `to' time in the stream's records */
-               period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (sub_period.from);
+               period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub));
        }
-       
-       AVSubtitleRect const * rect = sub.rects[0];
-
-       switch (rect->type) {
-       case SUBTITLE_NONE:
-               break;
-       case SUBTITLE_BITMAP:
-               decode_bitmap_subtitle (rect, period);
-               break;
-       case SUBTITLE_TEXT:
-               cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
-               break;
-       case SUBTITLE_ASS:
-               cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n";
-               break;
+
+       for (unsigned int i = 0; i < sub.num_rects; ++i) {
+               AVSubtitleRect const * rect = sub.rects[i];
+
+               switch (rect->type) {
+               case SUBTITLE_NONE:
+                       break;
+               case SUBTITLE_BITMAP:
+                       decode_bitmap_subtitle (rect, period);
+                       break;
+               case SUBTITLE_TEXT:
+                       cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
+                       break;
+               case SUBTITLE_ASS:
+                       decode_ass_subtitle (rect->ass, period);
+                       break;
+               }
        }
-       
+
        avsubtitle_free (&sub);
 }
 
 list<ContentTimePeriod>
 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
 {
-       return _ffmpeg_content->subtitles_during (p, starting);
+       return _ffmpeg_content->image_subtitles_during (p, starting);
 }
 
 list<ContentTimePeriod>
-FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const
+FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
 {
-       return list<ContentTimePeriod> ();
+       return _ffmpeg_content->text_subtitles_during (p, starting);
 }
 
 void
@@ -500,36 +493,135 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP
        /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
           G, third B, fourth A.
        */
-       shared_ptr<Image> image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
-       
+       shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
+
+#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
        /* Start of the first line in the subtitle */
        uint8_t* sub_p = rect->pict.data[0];
        /* sub_p looks up into a BGRA palette which is here
           (i.e. first byte B, second G, third R, fourth A)
        */
        uint32_t const * palette = (uint32_t *) rect->pict.data[1];
+#else
+       /* Start of the first line in the subtitle */
+       uint8_t* sub_p = rect->data[0];
+       /* sub_p looks up into a BGRA palette which is here
+          (i.e. first byte B, second G, third R, fourth A)
+       */
+       uint32_t const * palette = (uint32_t *) rect->data[1];
+#endif
+       /* And the stream has a map of those palette colours to colours
+          chosen by the user; created a `mapped' palette from those settings.
+       */
+       map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
+       vector<RGBA> mapped_palette (rect->nb_colors);
+       for (int i = 0; i < rect->nb_colors; ++i) {
+               RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
+               map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
+               if (j != colour_map.end ()) {
+                       mapped_palette[i] = j->second;
+               } else {
+                       /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
+                          it is from a project that was created before this stuff was added.  Just use the
+                          colour straight from the original palette.
+                       */
+                       mapped_palette[i] = c;
+               }
+       }
+
        /* Start of the output data */
        uint32_t* out_p = (uint32_t *) image->data()[0];
-       
+
        for (int y = 0; y < rect->h; ++y) {
                uint8_t* sub_line_p = sub_p;
                uint32_t* out_line_p = out_p;
                for (int x = 0; x < rect->w; ++x) {
-                       uint32_t const p = palette[*sub_line_p++];
-                       *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000);
+                       RGBA const p = mapped_palette[*sub_line_p++];
+                       /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */
+                       *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b;
                }
+#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
                sub_p += rect->pict.linesize[0];
+#else
+               sub_p += rect->linesize[0];
+#endif
                out_p += image->stride()[0] / sizeof (uint32_t);
        }
-       
-       dcp::Size const vs = _ffmpeg_content->video_size ();
+
+       dcp::Size const vs = _ffmpeg_content->video->video_size ();
        dcpomatic::Rect<double> const scaled_rect (
                static_cast<double> (rect->x) / vs.width,
                static_cast<double> (rect->y) / vs.height,
                static_cast<double> (rect->w) / vs.width,
                static_cast<double> (rect->h) / vs.height
                );
-       
+
        image_subtitle (period, image, scaled_rect);
 }
 
+void
+FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
+{
+       /* We have no styles and no Format: line, so I'm assuming that FFmpeg
+          produces a single format of Dialogue: lines...
+       */
+
+       vector<string> bits;
+       split (bits, ass, is_any_of (","));
+       if (bits.size() < 10) {
+               return;
+       }
+
+       sub::RawSubtitle base;
+       list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
+       list<sub::Subtitle> subs = sub::collect<list<sub::Subtitle> > (raw);
+
+       /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */
+
+       /* Highest line index in this subtitle */
+       int highest = 0;
+       BOOST_FOREACH (sub::Subtitle i, subs) {
+               BOOST_FOREACH (sub::Line j, i.lines) {
+                       DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE);
+                       DCPOMATIC_ASSERT (j.vertical_position.line);
+                       highest = max (highest, j.vertical_position.line.get());
+               }
+       }
+
+       list<dcp::SubtitleString> ss;
+
+       BOOST_FOREACH (sub::Subtitle i, sub::collect<list<sub::Subtitle> > (sub::SSAReader::parse_line (base, bits[9]))) {
+               BOOST_FOREACH (sub::Line j, i.lines) {
+                       BOOST_FOREACH (sub::Block k, j.blocks) {
+                               ss.push_back (
+                                       dcp::SubtitleString (
+                                               boost::optional<string> (),
+                                               k.italic,
+                                               k.bold,
+                                               dcp::Colour (255, 255, 255),
+                                               /* 48pt is 1/22nd of the screen height */
+                                               48,
+                                               1,
+                                               dcp::Time (i.from.seconds(), 1000),
+                                               dcp::Time (i.to.seconds(), 1000),
+                                               0,
+                                               dcp::HALIGN_CENTER,
+                                               /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom
+                                                  of the screen a bit to a pleasing degree.
+                                               */
+                                               1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22),
+                                               dcp::VALIGN_TOP,
+                                               dcp::DIRECTION_LTR,
+                                               k.text,
+                                               static_cast<dcp::Effect> (0),
+                                               dcp::Colour (255, 255, 255),
+                                               dcp::Time (),
+                                               dcp::Time ()
+                                               )
+                                       );
+                       }
+               }
+       }
+
+       text_subtitle (period, ss);
+}