* Fix exception when analysing audio of projects with more
than 8 DCP channels.
+2016-02-15 c.hetherington <cth@carlh.net>
+
+ * Support SSA subtitles embedded within FFmpeg files.
+
2016-02-12 Carl Hetherington <cth@carlh.net>
* Add basic support for SSA (SubStation Alpha) subtitles (#128).
return (('ffmpeg-cdist', '96d67de', ffmpeg_options),
('libdcp', '4e38f82'),
- ('libsub', 'd79b29f'))
+ ('libsub', '9cefa0f'))
def configure_options(target):
opt = ''
return digester.get ();
}
+bool
+FFmpeg::subtitle_is_image (AVSubtitle const & sub)
+{
+ bool image = false;
+ bool text = false;
+
+ for (unsigned int i = 0; i < sub.num_rects; ++i) {
+ switch (sub.rects[i]->type) {
+ case SUBTITLE_BITMAP:
+ image = true;
+ break;
+ case SUBTITLE_TEXT:
+ case SUBTITLE_ASS:
+ text = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* We can't cope with mixed image/text in one AVSubtitle */
+ DCPOMATIC_ASSERT (!image || !text);
+
+ return image;
+}
+
/** Compute the pts offset to use given a set of audio streams and some video details.
* Sometimes these parameters will have just been determined by an Examiner, sometimes
* they will have been retrieved from a piece of Content, hence the need for this method
static FFmpegSubtitlePeriod subtitle_period (AVSubtitle const & sub);
static std::string subtitle_id (AVSubtitle const & sub);
+ static bool subtitle_is_image (AVSubtitle const & sub);
boost::shared_ptr<const FFmpegContent> _ffmpeg_content;
}
list<ContentTimePeriod>
-FFmpegContent::subtitles_during (ContentTimePeriod period, bool starting) const
+FFmpegContent::image_subtitles_during (ContentTimePeriod period, bool starting) const
{
shared_ptr<FFmpegSubtitleStream> stream = subtitle_stream ();
if (!stream) {
return list<ContentTimePeriod> ();
}
- return stream->subtitles_during (period, starting);
+ return stream->image_subtitles_during (period, starting);
+}
+
+list<ContentTimePeriod>
+FFmpegContent::text_subtitles_during (ContentTimePeriod period, bool starting) const
+{
+ shared_ptr<FFmpegSubtitleStream> stream = subtitle_stream ();
+ if (!stream) {
+ return list<ContentTimePeriod> ();
+ }
+
+ return stream->text_subtitles_during (period, starting);
}
bool
-FFmpegContent::has_text_subtitles () const
+FFmpegContent::has_image_subtitles () const
{
+ BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, subtitle_streams()) {
+ if (i->has_image_subtitles()) {
+ return true;
+ }
+ }
+
return false;
}
bool
-FFmpegContent::has_image_subtitles () const
+FFmpegContent::has_text_subtitles () const
{
- return !subtitle_streams().empty ();
+ BOOST_FOREACH (shared_ptr<FFmpegSubtitleStream> i, subtitle_streams()) {
+ if (i->has_text_subtitles()) {
+ return true;
+ }
+ }
+
+ return false;
}
void
return _first_video;
}
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod, bool starting) const;
protected:
void add_properties (std::list<std::pair<std::string, std::string> > &) const;
#include "film.h"
#include "md5_digester.h"
#include "compose.hpp"
+#include <dcp/subtitle_string.h>
+#include <sub/ssa_reader.h>
+#include <sub/subtitle.h>
+#include <sub/collect.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
#include <boost/foreach.hpp>
+#include <boost/algorithm/string.hpp>
#include <vector>
#include <iomanip>
#include <iostream>
#define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
using std::cout;
+using std::string;
using std::vector;
using std::list;
using std::min;
using std::pair;
using std::max;
using boost::shared_ptr;
+using boost::is_any_of;
+using boost::split;
using dcp::Size;
FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
break;
case SUBTITLE_ASS:
- cout << "XXX: SUBTITLE_ASS " << rect->ass << "\n";
+ decode_ass_subtitle (rect->ass, period);
break;
}
}
list<ContentTimePeriod>
FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
{
- return _ffmpeg_content->subtitles_during (p, starting);
+ return _ffmpeg_content->image_subtitles_during (p, starting);
}
list<ContentTimePeriod>
-FFmpegDecoder::text_subtitles_during (ContentTimePeriod, bool) const
+FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
{
- return list<ContentTimePeriod> ();
+ return _ffmpeg_content->text_subtitles_during (p, starting);
}
void
image_subtitle (period, image, scaled_rect);
}
+
+void
+FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
+{
+ /* We have no styles and no Format: line, so I'm assuming that FFmpeg
+ produces a single format of Dialogue: lines...
+ */
+
+ vector<string> bits;
+ split (bits, ass, is_any_of (","));
+ if (bits.size() < 10) {
+ return;
+ }
+
+ sub::RawSubtitle base;
+ list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
+ list<sub::Subtitle> subs = sub::collect<list<sub::Subtitle> > (raw);
+
+ /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */
+
+ /* Highest line index in this subtitle */
+ int highest = 0;
+ BOOST_FOREACH (sub::Subtitle i, subs) {
+ BOOST_FOREACH (sub::Line j, i.lines) {
+ DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE);
+ DCPOMATIC_ASSERT (j.vertical_position.line);
+ highest = max (highest, j.vertical_position.line.get());
+ }
+ }
+
+ list<dcp::SubtitleString> ss;
+
+ BOOST_FOREACH (sub::Subtitle i, sub::collect<list<sub::Subtitle> > (sub::SSAReader::parse_line (base, bits[9]))) {
+ BOOST_FOREACH (sub::Line j, i.lines) {
+ BOOST_FOREACH (sub::Block k, j.blocks) {
+ ss.push_back (
+ dcp::SubtitleString (
+ boost::optional<string> (),
+ k.italic,
+ dcp::Colour (255, 255, 255),
+ 60,
+ 1,
+ dcp::Time (i.from.seconds(), 1000),
+ dcp::Time (i.to.seconds(), 1000),
+ 0,
+ dcp::HALIGN_CENTER,
+ /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom
+ of the screen a bit to a pleasing degree.
+ */
+ 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22),
+ dcp::VALIGN_TOP,
+ k.text,
+ static_cast<dcp::Effect> (0),
+ dcp::Colour (255, 255, 255),
+ dcp::Time (),
+ dcp::Time ()
+ )
+ );
+ }
+ }
+ }
+
+ text_subtitle (period, ss);
+}
void decode_subtitle_packet ();
void decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period);
+ void decode_ass_subtitle (std::string ass, ContentTimePeriod period);
void maybe_add_subtitle ();
boost::shared_ptr<AudioBuffers> deinterleave_audio (boost::shared_ptr<FFmpegAudioStream> stream) const;
/* Finish off any hanging subtitles at the end */
for (LastSubtitleMap::const_iterator i = _last_subtitle_start.begin(); i != _last_subtitle_start.end(); ++i) {
if (i->second) {
- i->first->add_subtitle (
- i->second->id,
- ContentTimePeriod (
- i->second->time,
- ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
- )
- );
+ if (i->second->image) {
+ i->first->add_image_subtitle (
+ i->second->id,
+ ContentTimePeriod (
+ i->second->time,
+ ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
+ )
+ );
+ } else {
+ i->first->add_text_subtitle (
+ i->second->id,
+ ContentTimePeriod (
+ i->second->time,
+ ContentTime::from_frames (video_length(), video_frame_rate().get_value_or (24))
+ )
+ );
+ }
}
}
if (avcodec_decode_subtitle2 (context, &sub, &frame_finished, &_packet) >= 0 && frame_finished) {
string id = subtitle_id (sub);
FFmpegSubtitlePeriod const period = subtitle_period (sub);
+ bool const image = subtitle_is_image (sub);
+
LastSubtitleMap::iterator last = _last_subtitle_start.find (stream);
if (last != _last_subtitle_start.end() && last->second) {
/* We have seen the start of a subtitle but not yet the end. Whatever this is
finishes the previous subtitle, so add it */
- stream->add_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
+ if (image) {
+ stream->add_image_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
+ } else {
+ stream->add_text_subtitle (last->second->id, ContentTimePeriod (last->second->time, period.from));
+ }
if (sub.num_rects == 0) {
/* This is a `proper' end-of-subtitle */
_last_subtitle_start[stream] = optional<SubtitleStart> ();
} else {
/* This is just another subtitle, so we start again */
- _last_subtitle_start[stream] = SubtitleStart (id, period.from);
+ _last_subtitle_start[stream] = SubtitleStart (id, image, period.from);
}
} else if (sub.num_rects == 1) {
if (period.to) {
- stream->add_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
+ if (image) {
+ stream->add_image_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
+ } else {
+ stream->add_text_subtitle (id, ContentTimePeriod (period.from, period.to.get ()));
+ }
} else {
- _last_subtitle_start[stream] = SubtitleStart (id, period.from);
+ _last_subtitle_start[stream] = SubtitleStart (id, image, period.from);
}
}
avsubtitle_free (&sub);
struct SubtitleStart
{
- SubtitleStart (std::string id_, ContentTime time_)
+ SubtitleStart (std::string id_, bool image_, ContentTime time_)
: id (id_)
+ , image (image_)
, time (time_)
{}
std::string id;
+ /** true if it's an image subtitle, false for text */
+ bool image;
ContentTime time;
};
{
if (version == 32) {
BOOST_FOREACH (cxml::NodePtr i, node->node_children ("Period")) {
- /* In version 32 we assumed that from times were unique, so they weer
- used as identifiers.
+ /* In version 32 we assumed that from times were unique, so they were
+ used as identifiers. All subtitles were image subtitles.
*/
- add_subtitle (
+ add_image_subtitle (
raw_convert<string> (i->string_child ("From")),
ContentTimePeriod (
ContentTime (i->number_child<ContentTime::Type> ("From")),
);
}
} else {
- /* In version 33 we use a hash of various parts of the subtitle as the id */
+ /* In version 33 we use a hash of various parts of the subtitle as the id.
+ <Subtitle> was initially used for image subtitles; later we have
+ <ImageSubtitle> and <TextSubtitle>
+ */
BOOST_FOREACH (cxml::NodePtr i, node->node_children ("Subtitle")) {
- add_subtitle (
+ add_image_subtitle (
+ raw_convert<string> (i->string_child ("Id")),
+ ContentTimePeriod (
+ ContentTime (i->number_child<ContentTime::Type> ("From")),
+ ContentTime (i->number_child<ContentTime::Type> ("To"))
+ )
+ );
+ }
+
+ BOOST_FOREACH (cxml::NodePtr i, node->node_children ("ImageSubtitle")) {
+ add_image_subtitle (
+ raw_convert<string> (i->string_child ("Id")),
+ ContentTimePeriod (
+ ContentTime (i->number_child<ContentTime::Type> ("From")),
+ ContentTime (i->number_child<ContentTime::Type> ("To"))
+ )
+ );
+ }
+
+ BOOST_FOREACH (cxml::NodePtr i, node->node_children ("TextSubtitle")) {
+ add_text_subtitle (
raw_convert<string> (i->string_child ("Id")),
ContentTimePeriod (
ContentTime (i->number_child<ContentTime::Type> ("From")),
{
FFmpegStream::as_xml (root);
- for (map<string, ContentTimePeriod>::const_iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
- xmlpp::Node* node = root->add_child ("Subtitle");
+ as_xml (root, _image_subtitles, "ImageSubtitle");
+ as_xml (root, _text_subtitles, "TextSubtitle");
+}
+
+void
+FFmpegSubtitleStream::as_xml (xmlpp::Node* root, PeriodMap const & subs, string node_name) const
+{
+ for (PeriodMap::const_iterator i = subs.begin(); i != subs.end(); ++i) {
+ xmlpp::Node* node = root->add_child (node_name);
node->add_child("Id")->add_child_text (i->first);
node->add_child("From")->add_child_text (raw_convert<string> (i->second.from.get ()));
node->add_child("To")->add_child_text (raw_convert<string> (i->second.to.get ()));
}
void
-FFmpegSubtitleStream::add_subtitle (string id, ContentTimePeriod period)
+FFmpegSubtitleStream::add_image_subtitle (string id, ContentTimePeriod period)
+{
+ DCPOMATIC_ASSERT (_image_subtitles.find (id) == _image_subtitles.end ());
+ _image_subtitles[id] = period;
+}
+
+void
+FFmpegSubtitleStream::add_text_subtitle (string id, ContentTimePeriod period)
{
- DCPOMATIC_ASSERT (_subtitles.find (id) == _subtitles.end ());
- _subtitles[id] = period;
+ DCPOMATIC_ASSERT (_text_subtitles.find (id) == _text_subtitles.end ());
+ _text_subtitles[id] = period;
}
list<ContentTimePeriod>
-FFmpegSubtitleStream::subtitles_during (ContentTimePeriod period, bool starting) const
+FFmpegSubtitleStream::image_subtitles_during (ContentTimePeriod period, bool starting) const
+{
+ return subtitles_during (period, starting, _image_subtitles);
+}
+
+list<ContentTimePeriod>
+FFmpegSubtitleStream::text_subtitles_during (ContentTimePeriod period, bool starting) const
+{
+ return subtitles_during (period, starting, _text_subtitles);
+}
+
+list<ContentTimePeriod>
+FFmpegSubtitleStream::subtitles_during (ContentTimePeriod period, bool starting, PeriodMap const & subs) const
{
list<ContentTimePeriod> d;
/* XXX: inefficient */
- for (map<string, ContentTimePeriod>::const_iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
+ for (map<string, ContentTimePeriod>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
if ((starting && period.contains (i->second.from)) || (!starting && period.overlaps (i->second))) {
d.push_back (i->second);
}
ContentTime
FFmpegSubtitleStream::find_subtitle_to (string id) const
{
- map<string, ContentTimePeriod>::const_iterator i = _subtitles.find (id);
- DCPOMATIC_ASSERT (i != _subtitles.end ());
+ PeriodMap::const_iterator i = _image_subtitles.find (id);
+ if (i != _image_subtitles.end ()) {
+ return i->second.to;
+ }
+
+ i = _text_subtitles.find (id);
+ DCPOMATIC_ASSERT (i != _text_subtitles.end ());
return i->second.to;
}
void
FFmpegSubtitleStream::add_offset (ContentTime offset)
{
- for (map<string, ContentTimePeriod>::iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
+ for (PeriodMap::iterator i = _image_subtitles.begin(); i != _image_subtitles.end(); ++i) {
+ i->second.from += offset;
+ i->second.to += offset;
+ }
+
+ for (PeriodMap::iterator i = _text_subtitles.begin(); i != _text_subtitles.end(); ++i) {
i->second.from += offset;
i->second.to += offset;
}
void as_xml (xmlpp::Node *) const;
- void add_subtitle (std::string id, ContentTimePeriod period);
- std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod period, bool starting) const;
+ void add_image_subtitle (std::string id, ContentTimePeriod period);
+ void add_text_subtitle (std::string id, ContentTimePeriod period);
+ std::list<ContentTimePeriod> image_subtitles_during (ContentTimePeriod period, bool starting) const;
+ std::list<ContentTimePeriod> text_subtitles_during (ContentTimePeriod period, bool starting) const;
ContentTime find_subtitle_to (std::string id) const;
void add_offset (ContentTime offset);
+ bool has_image_subtitles () const {
+ return !_image_subtitles.empty ();
+ }
+ bool has_text_subtitles () const {
+ return !_text_subtitles.empty ();
+ }
+
private:
- std::map<std::string, ContentTimePeriod> _subtitles;
+
+ typedef std::map<std::string, ContentTimePeriod> PeriodMap;
+
+ void as_xml (xmlpp::Node *, PeriodMap const & subs, std::string node) const;
+ std::list<ContentTimePeriod> subtitles_during (ContentTimePeriod period, bool starting, PeriodMap const & subs) const;
+
+ PeriodMap _image_subtitles;
+ PeriodMap _text_subtitles;
};
if (!ps.text.empty ()) {
list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
copy (s.begin (), s.end (), back_inserter (sub_images));
+ cout << "got " << s.size() << " text subs rendered to images.\n";
}
optional<PositionImage> subtitles;
break;
}
- return PositionImage (image, Position<int> (x, y));
+ return PositionImage (image, Position<int> (max (0, x), max (0, y)));
}
list<PositionImage>