list<ContentTimePeriod>
-DCPDecoder::subtitles_during (ContentTimePeriod, bool) const
+DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const
+{
+ return list<ContentTimePeriod> ();
+}
+
+list<ContentTimePeriod>
+DCPDecoder::text_subtitles_during (ContentTimePeriod, bool) const
{
/* XXX */
return list<ContentTimePeriod> ();
private:
void seek (ContentTime t, bool accurate);
bool pass ();
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
ContentTime _next;
std::list<boost::shared_ptr<dcp::Reel> > _reels;
}
list<ContentTimePeriod>
-DCPSubtitleDecoder::subtitles_during (ContentTimePeriod p, bool starting) const
+DCPSubtitleDecoder::image_subtitles_during (ContentTimePeriod, bool) const
+{
+ return list<ContentTimePeriod> ();
+}
+
+list<ContentTimePeriod>
+DCPSubtitleDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
{
/* XXX: inefficient */
bool pass ();
private:
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
std::list<dcp::SubtitleString> _subtitles;
std::list<dcp::SubtitleString>::const_iterator _next;
{
public:
ContentTimePeriod () {}
+
ContentTimePeriod (ContentTime f, ContentTime t)
: from (f)
, to (t)
{
VideoDecoder::seek (time, accurate);
AudioDecoder::seek (time, accurate);
+ SubtitleDecoder::seek (time, accurate);
/* If we are doing an `accurate' seek, we need to use pre-roll, as
we don't really know what the seek will give us.
return;
}
- /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
- indicate that the previous subtitle should stop.
+ /* Subtitle PTS (within the source, not taking into account any of the
+ source that we may have chopped off for the DCP)
*/
+ FFmpegSubtitlePeriod period = subtitle_period (sub);
+ period.from += _pts_offset;
+ if (period.to) {
+ period.to = period.to.get() + _pts_offset;
+ }
+
if (sub.num_rects <= 0) {
- image_subtitle (ContentTimePeriod (), shared_ptr<Image> (), dcpomatic::Rect<double> ());
+ /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
+ indicate that the previous subtitle should stop. Emit the pending one.
+ */
+ if (_pending_subtitle_from && _pending_subtitle_image && _pending_subtitle_rect) {
+ image_subtitle (
+ ContentTimePeriod (_pending_subtitle_from.get(), period.from),
+ _pending_subtitle_image,
+ _pending_subtitle_rect.get ()
+ );
+ _pending_subtitle_from = optional<ContentTime> ();
+ _pending_subtitle_image.reset ();
+ _pending_subtitle_rect = optional<dcpomatic::Rect<double> > ();
+ }
return;
} else if (sub.num_rects > 1) {
throw DecodeError (_("multi-part subtitles not yet supported"));
}
- /* Subtitle PTS (within the source, not taking into account any of the
- source that we may have chopped off for the DCP)
- */
- ContentTimePeriod period = subtitle_period (sub) + _pts_offset;
-
AVSubtitleRect const * rect = sub.rects[0];
if (rect->type != SUBTITLE_BITMAP) {
- /* XXX */
- // throw DecodeError (_("non-bitmap subtitles not yet supported"));
- return;
+ throw DecodeError (_("non-bitmap subtitles not yet supported"));
}
/* Note RGBA is expressed little-endian, so the first byte in the word is R, second
}
dcp::Size const vs = _ffmpeg_content->video_size ();
-
- image_subtitle (
- period,
- image,
- dcpomatic::Rect<double> (
- static_cast<double> (rect->x) / vs.width,
- static_cast<double> (rect->y) / vs.height,
- static_cast<double> (rect->w) / vs.width,
- static_cast<double> (rect->h) / vs.height
- )
+ dcpomatic::Rect<double> const scaled_rect (
+ static_cast<double> (rect->x) / vs.width,
+ static_cast<double> (rect->y) / vs.height,
+ static_cast<double> (rect->w) / vs.width,
+ static_cast<double> (rect->h) / vs.height
);
+
+ if (period.to) {
+ image_subtitle (ContentTimePeriod (period.from, period.to.get()), image, scaled_rect);
+ } else {
+ /* We don't know when this subtitle stops, so store it until we find out */
+ _pending_subtitle_from = period.from;
+ _pending_subtitle_image = image;
+ _pending_subtitle_rect = scaled_rect;
+ }
avsubtitle_free (&sub);
}
list<ContentTimePeriod>
-FFmpegDecoder::subtitles_during (ContentTimePeriod p, bool starting) const
+FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
{
return _ffmpeg_content->subtitles_during (p, starting);
}
+
+list<ContentTimePeriod>
+FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const
+{
+ return list<ContentTimePeriod> ();
+}
#include "audio_decoder.h"
#include "subtitle_decoder.h"
#include "ffmpeg.h"
+#include "rect.h"
extern "C" {
#include <libavcodec/avcodec.h>
}
void maybe_add_subtitle ();
boost::shared_ptr<AudioBuffers> deinterleave_audio (uint8_t** data, int size);
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod, bool starting) const;
+ boost::optional<ContentTime> _pending_subtitle_from;
+ boost::shared_ptr<Image> _pending_subtitle_image;
+ boost::optional<dcpomatic::Rect<double> > _pending_subtitle_rect;
+
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
boost::shared_ptr<Log> _log;
int frame_finished;
AVSubtitle sub;
if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
- ContentTimePeriod const period = subtitle_period (sub);
- if (sub.num_rects == 0 && !stream->periods.empty () && stream->periods.back().to > period.from) {
- /* Finish the last subtitle */
- stream->periods.back().to = period.from;
+ FFmpegSubtitlePeriod const period = subtitle_period (sub);
+ if (sub.num_rects <= 0 && _last_subtitle_start) {
+ stream->periods.push_back (ContentTimePeriod (_last_subtitle_start.get (), period.from));
+ _last_subtitle_start = optional<ContentTime> ();
} else if (sub.num_rects == 1) {
- stream->periods.push_back (period);
+ if (period.to) {
+ stream->periods.push_back (ContentTimePeriod (period.from, period.to.get ()));
+ } else {
+ _last_subtitle_start = period.from;
+ }
}
+ avsubtitle_free (&sub);
}
}
std::string audio_stream_name (AVStream* s) const;
std::string subtitle_stream_name (AVStream* s) const;
boost::optional<ContentTime> frame_time (AVStream* s) const;
-
+
std::vector<boost::shared_ptr<FFmpegSubtitleStream> > _subtitle_streams;
std::vector<boost::shared_ptr<FFmpegAudioStream> > _audio_streams;
boost::optional<ContentTime> _first_video;
*/
ContentTime _video_length;
bool _need_video_length;
+
+ boost::optional<ContentTime> _last_subtitle_start;
};
*/
#include "ffmpeg_subtitle_stream.h"
+#include "raw_convert.h"
+#include <libxml++/libxml++.h>
+#include <boost/foreach.hpp>
+
+using std::string;
/** Construct a SubtitleStream from a value returned from to_string().
* @param t String returned from to_string().
FFmpegSubtitleStream::FFmpegSubtitleStream (cxml::ConstNodePtr node)
: FFmpegStream (node)
{
-
+ BOOST_FOREACH (cxml::NodePtr i, node->node_children ("Period")) {
+ periods.push_back (
+ ContentTimePeriod (
+ ContentTime (node->number_child<ContentTime::Type> ("From")),
+ ContentTime (node->number_child<ContentTime::Type> ("To"))
+ )
+ );
+ }
}
void
FFmpegSubtitleStream::as_xml (xmlpp::Node* root) const
{
FFmpegStream::as_xml (root);
+
+ BOOST_FOREACH (ContentTimePeriod const & i, periods) {
+ xmlpp::Node* node = root->add_child ("Period");
+ node->add_child("From")->add_child_text (raw_convert<string> (i.from.get ()));
+ node->add_child("To")->add_child_text (raw_convert<string> (i.to.get ()));
+ }
}
list<PositionImage> c = transform_image_subtitles (ps.image);
copy (c.begin(), c.end(), back_inserter (sub_images));
- /* Text subtitles (rendered to images) */
- sub_images.push_back (render_subtitles (ps.text, _video_container_size));
-
+ /* Text subtitles (rendered to an image) */
+ if (!ps.text.empty ()) {
+ sub_images.push_back (render_subtitles (ps.text, _video_container_size));
+ }
+
if (!sub_images.empty ()) {
for (list<shared_ptr<PlayerVideo> >::const_iterator i = pvf.begin(); i != pvf.end(); ++i) {
(*i)->set_subtitle (merge (sub_images));
PositionImage
render_subtitles (list<dcp::SubtitleString> subtitles, dcp::Size target)
{
- if (subtitles.empty ()) {
- return PositionImage ();
- }
-
/* Estimate height that the subtitle image needs to be */
optional<int> top;
optional<int> bottom;
}
list<ContentTimePeriod>
-SubRipDecoder::subtitles_during (ContentTimePeriod p, bool starting) const
+SubRipDecoder::image_subtitles_during (ContentTimePeriod, bool) const
+{
+ return list<ContentTimePeriod> ();
+}
+
+list<ContentTimePeriod>
+SubRipDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
{
/* XXX: inefficient */
bool pass ();
private:
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
size_t _next;
};
/*
- Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
}
/** Called by subclasses when an image subtitle is ready.
- * Image may be 0 to say that there is no current subtitle.
+ * @param period Period of the subtitle.
+ * @param image Subtitle image.
* @param rect Area expressed as a fraction of the video frame that this subtitle
* is for (e.g. a width of 0.5 means the width of the subtitle is half the width
* of the video frame)
_decoded_text_subtitles.push_back (ContentTextSubtitle (s));
}
+/** @param sp Full periods of subtitles that are showing or starting during the specified period */
template <class T>
list<T>
-SubtitleDecoder::get (list<T> const & subs, ContentTimePeriod period, bool starting)
+SubtitleDecoder::get (list<T> const & subs, list<ContentTimePeriod> const & sp, ContentTimePeriod period, bool starting)
{
- /* Get the full periods of the subtitles that are showing or starting during the specified period */
- list<ContentTimePeriod> sp = subtitles_during (period, starting);
if (sp.empty ()) {
/* Nothing in this period */
return list<T> ();
list<ContentTextSubtitle>
SubtitleDecoder::get_text_subtitles (ContentTimePeriod period, bool starting)
{
- return get<ContentTextSubtitle> (_decoded_text_subtitles, period, starting);
+ return get<ContentTextSubtitle> (_decoded_text_subtitles, text_subtitles_during (period, starting), period, starting);
}
list<ContentImageSubtitle>
SubtitleDecoder::get_image_subtitles (ContentTimePeriod period, bool starting)
{
- return get<ContentImageSubtitle> (_decoded_image_subtitles, period, starting);
+ return get<ContentImageSubtitle> (_decoded_image_subtitles, image_subtitles_during (period, starting), period, starting);
}
void
private:
template <class T>
- std::list<T> get (std::list<T> const & subs, ContentTimePeriod period, bool starting);
+ std::list<T> get (std::list<T> const & subs, std::list<ContentTimePeriod> const & sp, ContentTimePeriod period, bool starting);
/** @param starting true if we want only subtitles that start during the period, otherwise
* we want subtitles that overlap the period.
*/
- virtual std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod period, bool starting) const = 0;
+ virtual std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod period, bool starting) const = 0;
+ virtual std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod period, bool starting) const = 0;
boost::shared_ptr<const SubtitleContent> _subtitle_content;
};
#include "dcpomatic_time.h"
#include "position.h"
+#include "rect.h"
#include <dcp/util.h>
#include <boost/shared_ptr.hpp>
#include <vector>
}
return p;
}
-
-ContentTimePeriod
+
+FFmpegSubtitlePeriod
subtitle_period (AVSubtitle const & sub)
{
ContentTime const packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE);
- ContentTimePeriod period (
+ if (sub.end_display_time == static_cast<uint32_t> (-1)) {
+ /* End time is not known */
+ return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
+ }
+
+ return FFmpegSubtitlePeriod (
packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3),
packet_time + ContentTime::from_seconds (sub.end_display_time / 1e3)
);
-
- return period;
}
map<string, string>
extern int stride_round_up (int, int const *, int);
extern int round_to (float n, int r);
extern void* wrapped_av_malloc (size_t);
-extern ContentTimePeriod subtitle_period (AVSubtitle const &);
+
+class FFmpegSubtitlePeriod
+{
+public:
+ FFmpegSubtitlePeriod (ContentTime f)
+ : from (f)
+ {}
+
+ FFmpegSubtitlePeriod (ContentTime f, ContentTime t)
+ : from (f)
+ , to (t)
+ {}
+
+ ContentTime from;
+ boost::optional<ContentTime> to;
+};
+
+extern FFmpegSubtitlePeriod subtitle_period (AVSubtitle const &);
extern void set_backtrace_file (boost::filesystem::path);
extern dcp::FrameInfo read_frame_info (FILE* file, int frame, Eyes eyes);
extern void write_frame_info (FILE* file, int frame, Eyes eyes, dcp::FrameInfo info);