hold subs and closed captions.
for (Map::const_iterator i = _data.begin(); i != _data.end(); ++i) {
- shared_ptr<CaptionContent> caption = i->first.lock ();
+ shared_ptr<const CaptionContent> caption = i->first.lock ();
if (!caption) {
continue;
}
* @param from From time for these subtitles.
*/
void
-ActiveCaptions::add_from (weak_ptr<CaptionContent> content, PlayerCaption ps, DCPTime from)
+ActiveCaptions::add_from (weak_ptr<const CaptionContent> content, PlayerCaption ps, DCPTime from)
{
if (_data.find(content) == _data.end()) {
_data[content] = list<Period>();
* @return Return the corresponding subtitles and their from time.
*/
pair<PlayerCaption, DCPTime>
-ActiveCaptions::add_to (weak_ptr<CaptionContent> content, DCPTime to)
+ActiveCaptions::add_to (weak_ptr<const CaptionContent> content, DCPTime to)
{
DCPOMATIC_ASSERT (_data.find(content) != _data.end());
* @return true if we have any active subtitles from this content.
*/
bool
-ActiveCaptions::have (weak_ptr<CaptionContent> content) const
+ActiveCaptions::have (weak_ptr<const CaptionContent> content) const
{
Map::const_iterator i = _data.find(content);
if (i == _data.end()) {
std::list<PlayerCaption> get_burnt (DCPTimePeriod period, bool always_burn_captions) const;
void clear_before (DCPTime time);
void clear ();
- void add_from (boost::weak_ptr<CaptionContent> content, PlayerCaption ps, DCPTime from);
- std::pair<PlayerCaption, DCPTime> add_to (boost::weak_ptr<CaptionContent> content, DCPTime to);
- bool have (boost::weak_ptr<CaptionContent> content) const;
+ void add_from (boost::weak_ptr<const CaptionContent> content, PlayerCaption ps, DCPTime from);
+ std::pair<PlayerCaption, DCPTime> add_to (boost::weak_ptr<const CaptionContent> content, DCPTime to);
+ bool have (boost::weak_ptr<const CaptionContent> content) const;
private:
class Period
boost::optional<DCPTime> to;
};
- typedef std::map<boost::weak_ptr<CaptionContent>, std::list<Period> > Map;
+ typedef std::map<boost::weak_ptr<const CaptionContent>, std::list<Period> > Map;
Map _data;
};
{
shared_ptr<Player> player (new Player (_film, _playlist));
player->set_ignore_video ();
- player->set_ignore_subtitle ();
+ player->set_ignore_caption ();
player->set_fast ();
player->set_play_referenced ();
player->Audio.connect (bind (&AnalyseAudioJob::analyse, this, _1, _2));
, _line_spacing (1)
, _outline_width (2)
, _type (CAPTION_OPEN)
+ , _original_type (CAPTION_OPEN)
{
}
-shared_ptr<CaptionContent>
+/** @return CaptionContents from node or <Caption> nodes under node (according to version).
+ * The list could be empty if no CaptionContents are found.
+ */
+list<shared_ptr<CaptionContent> >
CaptionContent::from_xml (Content* parent, cxml::ConstNodePtr node, int version)
{
if (version < 34) {
subtitle streams, so check for that.
*/
if (node->string_child("Type") == "FFmpeg" && node->node_children("SubtitleStream").empty()) {
- return shared_ptr<CaptionContent> ();
+ return list<shared_ptr<CaptionContent> >();
}
/* Otherwise we can drop through to the newer logic */
if (version < 37) {
if (!node->optional_number_child<double>("SubtitleXOffset") && !node->optional_number_child<double>("SubtitleOffset")) {
- return shared_ptr<CaptionContent> ();
+ return list<shared_ptr<CaptionContent> >();
}
- return shared_ptr<CaptionContent> (new CaptionContent (parent, node, version));
+ list<shared_ptr<CaptionContent> > c;
+ c.push_back (shared_ptr<CaptionContent> (new CaptionContent (parent, node, version)));
+ return c;
}
if (!node->node_child("Caption")) {
- return shared_ptr<CaptionContent> ();
+ return list<shared_ptr<CaptionContent> >();
}
- return shared_ptr<CaptionContent> (new CaptionContent (parent, node->node_child("Caption"), version));
+ list<shared_ptr<CaptionContent> > c;
+ BOOST_FOREACH (cxml::ConstNodePtr i, node->node_children("Caption")) {
+ c.push_back (shared_ptr<CaptionContent> (new CaptionContent (parent, i, version)));
+ }
+ return c;
}
CaptionContent::CaptionContent (Content* parent, cxml::ConstNodePtr node, int version)
connect_to_fonts ();
_type = string_to_caption_type (node->optional_string_child("Type").get_value_or("open"));
+ _original_type = string_to_caption_type (node->optional_string_child("Type").get_value_or("open"));
}
}
caption->add_child("Type")->add_child_text (caption_type_to_string(_type));
+ caption->add_child("OriginalType")->add_child_text (caption_type_to_string(_original_type));
}
string
return _type;
}
- static boost::shared_ptr<CaptionContent> from_xml (Content* parent, cxml::ConstNodePtr, int version);
+ CaptionType original_type () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _original_type;
+ }
+
+ static std::list<boost::shared_ptr<CaptionContent> > from_xml (Content* parent, cxml::ConstNodePtr, int version);
protected:
/** subtitle language (e.g. "German") or empty if it is not known */
boost::optional<ContentTime> _fade_in;
boost::optional<ContentTime> _fade_out;
int _outline_width;
+ /** what these captions will be used for in the output DCP (not necessarily what
+ * they were originally).
+ */
CaptionType _type;
+ /** the original type of these captions in their content */
+ CaptionType _original_type;
};
#endif
if (audio && c->audio) {
audio->take_settings_from (c->audio);
}
- if (caption && c->caption) {
- caption->take_settings_from (c->caption);
+
+ list<shared_ptr<CaptionContent> >::iterator i = caption.begin ();
+ list<shared_ptr<CaptionContent> >::const_iterator j = c->caption.begin ();
+ while (i != caption.end() && j != c->caption.end()) {
+ (*i)->take_settings_from (*j);
+ ++i;
+ ++j;
+ }
+}
+
+shared_ptr<CaptionContent>
+Content::only_caption () const
+{
+ DCPOMATIC_ASSERT (caption.size() < 2);
+ if (caption.empty ()) {
+ return shared_ptr<CaptionContent> ();
+ }
+ return caption.front ();
+}
+
+shared_ptr<CaptionContent>
+Content::caption_of_original_type (CaptionType type) const
+{
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, caption) {
+ if (i->original_type() == type) {
+ return i;
+ }
}
+
+ return shared_ptr<CaptionContent> ();
}
boost::shared_ptr<VideoContent> video;
boost::shared_ptr<AudioContent> audio;
- boost::shared_ptr<CaptionContent> caption;
+ std::list<boost::shared_ptr<CaptionContent> > caption;
+
+ boost::shared_ptr<CaptionContent> only_caption () const;
+ boost::shared_ptr<CaptionContent> caption_of_original_type (CaptionType type) const;
void signal_changed (int);
int const DCPContentProperty::NEEDS_KDM = 601;
int const DCPContentProperty::REFERENCE_VIDEO = 602;
int const DCPContentProperty::REFERENCE_AUDIO = 603;
-int const DCPContentProperty::REFERENCE_SUBTITLE = 604;
+int const DCPContentProperty::REFERENCE_CAPTION = 604;
int const DCPContentProperty::NAME = 605;
-int const DCPContentProperty::HAS_SUBTITLES = 606;
+int const DCPContentProperty::CAPTIONS = 606;
DCPContent::DCPContent (shared_ptr<const Film> film, boost::filesystem::path p)
: Content (film)
, _kdm_valid (false)
, _reference_video (false)
, _reference_audio (false)
- , _reference_subtitle (false)
, _three_d (false)
{
read_directory (p);
set_default_colour_conversion ();
+
+ for (int i = 0; i < CAPTION_COUNT; ++i) {
+ _reference_caption[i] = false;
+ }
}
DCPContent::DCPContent (shared_ptr<const Film> film, cxml::ConstNodePtr node, int version)
audio = AudioContent::from_xml (this, node, version);
caption = CaptionContent::from_xml (this, node, version);
+ for (int i = 0; i < CAPTION_COUNT; ++i) {
+ _reference_caption[i] = false;
+ }
+
if (video && audio) {
audio->set_stream (
AudioStreamPtr (
_kdm_valid = node->bool_child ("KDMValid");
_reference_video = node->optional_bool_child ("ReferenceVideo").get_value_or (false);
_reference_audio = node->optional_bool_child ("ReferenceAudio").get_value_or (false);
- _reference_subtitle = node->optional_bool_child ("ReferenceSubtitle").get_value_or (false);
+ if (version >= 37) {
+ _reference_caption[CAPTION_OPEN] = node->optional_bool_child("ReferenceOpenCaption").get_value_or(false);
+ _reference_caption[CAPTION_CLOSED] = node->optional_bool_child("ReferenceClosedCaption").get_value_or(false);
+ } else {
+ _reference_caption[CAPTION_OPEN] = node->optional_bool_child("ReferenceSubtitle").get_value_or(false);
+ _reference_caption[CAPTION_CLOSED] = false;
+ }
if (node->optional_string_child("Standard")) {
string const s = node->optional_string_child("Standard").get();
if (s == "Interop") {
bool const needed_assets = needs_assets ();
bool const needed_kdm = needs_kdm ();
string const old_name = name ();
- bool had_subtitles = static_cast<bool> (caption);
+ int const old_captions = caption.size ();
if (job) {
job->set_progress_unknown ();
signal_changed (AudioContentProperty::STREAMS);
}
- bool has_subtitles = false;
+ int captions = 0;
{
boost::mutex::scoped_lock lm (_mutex);
_name = examiner->name ();
- if (examiner->has_subtitles ()) {
- caption.reset (new CaptionContent (this));
- } else {
- caption.reset ();
+ for (int i = 0; i < examiner->captions(); ++i) {
+ caption.push_back (shared_ptr<CaptionContent> (new CaptionContent (this)));
}
- has_subtitles = static_cast<bool> (caption);
+ captions = caption.size ();
_encrypted = examiner->encrypted ();
_needs_assets = examiner->needs_assets ();
_kdm_valid = examiner->kdm_valid ();
_reel_lengths = examiner->reel_lengths ();
}
- if (had_subtitles != has_subtitles) {
- signal_changed (DCPContentProperty::HAS_SUBTITLES);
+ if (old_captions != captions) {
+ signal_changed (DCPContentProperty::CAPTIONS);
}
if (needed_assets != needs_assets ()) {
audio->stream()->mapping().as_xml (node->add_child("AudioMapping"));
}
- if (caption) {
- caption->as_xml (node);
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, caption) {
+ i->as_xml (node);
}
boost::mutex::scoped_lock lm (_mutex);
node->add_child("KDMValid")->add_child_text (_kdm_valid ? "1" : "0");
node->add_child("ReferenceVideo")->add_child_text (_reference_video ? "1" : "0");
node->add_child("ReferenceAudio")->add_child_text (_reference_audio ? "1" : "0");
- node->add_child("ReferenceSubtitle")->add_child_text (_reference_subtitle ? "1" : "0");
+ node->add_child("ReferenceOpenCaption")->add_child_text(_reference_caption[CAPTION_OPEN] ? "1" : "0");
+ node->add_child("ReferenceClosedCaption")->add_child_text(_reference_caption[CAPTION_CLOSED] ? "1" : "0");
if (_standard) {
switch (_standard.get ()) {
case dcp::INTEROP:
s += video->identifier() + "_";
}
- if (caption) {
- s += caption->identifier () + " ";
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, caption) {
+ s += i->identifier () + " ";
}
- s += string (_reference_video ? "1" : "0") + string (_reference_subtitle ? "1" : "0");
+ s += string (_reference_video ? "1" : "0");
+ for (int i = 0; i < CAPTION_COUNT; ++i) {
+ s += string (_reference_caption[i] ? "1" : "0");
+ }
return s;
}
}
void
-DCPContent::set_reference_subtitle (bool r)
+DCPContent::set_reference_caption (CaptionType type, bool r)
{
{
boost::mutex::scoped_lock lm (_mutex);
- _reference_subtitle = r;
+ _reference_caption[type] = r;
}
- signal_changed (DCPContentProperty::REFERENCE_SUBTITLE);
+ signal_changed (DCPContentProperty::REFERENCE_CAPTION);
}
list<DCPTimePeriod>
}
bool
-DCPContent::can_reference (function<shared_ptr<ContentPart> (shared_ptr<const Content>)> part, string overlapping, string& why_not) const
+DCPContent::can_reference (function<bool (shared_ptr<const Content>)> part, string overlapping, string& why_not) const
{
/* We must be using the same standard as the film */
if (_standard) {
return true;
}
+static
+bool check_video (shared_ptr<const Content> c)
+{
+ return static_cast<bool>(c->video);
+}
+
bool
DCPContent::can_reference_video (string& why_not) const
{
}
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
- return can_reference (bind (&Content::video, _1), _("it overlaps other video content; remove the other content."), why_not);
+ return can_reference (bind (&check_video, _1), _("it overlaps other video content; remove the other content."), why_not);
+}
+
+static
+bool check_audio (shared_ptr<const Content> c)
+{
+ return static_cast<bool>(c->audio);
}
bool
}
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
- return can_reference (bind (&Content::audio, _1), _("it overlaps other audio content; remove the other content."), why_not);
+ return can_reference (bind (&check_audio, _1), _("it overlaps other audio content; remove the other content."), why_not);
}
+static
+bool check_caption (shared_ptr<const Content> c)
+{
+ return !c->caption.empty();
+}
bool
-DCPContent::can_reference_subtitle (string& why_not) const
+DCPContent::can_reference_caption (CaptionType type, string& why_not) const
{
shared_ptr<DCPDecoder> decoder;
try {
}
BOOST_FOREACH (shared_ptr<dcp::Reel> i, decoder->reels()) {
- if (!i->main_subtitle()) {
+ if (type == CAPTION_OPEN && !i->main_subtitle()) {
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
why_not = _("it does not have subtitles in all its reels.");
return false;
}
+ if (type == CAPTION_CLOSED && !i->closed_caption()) {
+ /// TRANSLATORS: this string will follow "Cannot reference this DCP: "
+ why_not = _("it does not have closed captions in all its reels.");
+ return false;
+ }
}
/// TRANSLATORS: this string will follow "Cannot reference this DCP: "
- return can_reference (bind (&Content::caption, _1), _("it overlaps other caption content; remove the other content."), why_not);
+ return can_reference (bind (&check_caption, _1), _("it overlaps other caption content; remove the other content."), why_not);
}
void
_reference_video = dc->_reference_video;
_reference_audio = dc->_reference_audio;
- _reference_subtitle = dc->_reference_subtitle;
+ for (int i = 0; i < CAPTION_COUNT; ++i) {
+ _reference_caption[i] = dc->_reference_caption[i];
+ }
}
void
static int const NEEDS_ASSETS;
static int const REFERENCE_VIDEO;
static int const REFERENCE_AUDIO;
- static int const REFERENCE_SUBTITLE;
+ static int const REFERENCE_CAPTION;
static int const NAME;
- static int const HAS_SUBTITLES;
+ static int const CAPTIONS;
};
class ContentPart;
bool can_reference_audio (std::string &) const;
- void set_reference_subtitle (bool r);
+ void set_reference_caption (CaptionType type, bool r);
- bool reference_subtitle () const {
+ /** @param type Original type of captions in the DCP.
+ * @return true if these captions are to be referenced.
+ */
+ bool reference_caption (CaptionType type) const {
boost::mutex::scoped_lock lm (_mutex);
- return _reference_subtitle;
+ return _reference_caption[type];
}
- bool can_reference_subtitle (std::string &) const;
+ bool can_reference_caption (CaptionType type, std::string &) const;
void set_cpl (std::string id);
void read_directory (boost::filesystem::path);
std::list<DCPTimePeriod> reels () const;
bool can_reference (
- boost::function <boost::shared_ptr<ContentPart> (boost::shared_ptr<const Content>)>,
+ boost::function <bool (boost::shared_ptr<const Content>)>,
std::string overlapping,
std::string& why_not
) const;
* rather than by rewrapping.
*/
bool _reference_audio;
- /** true if the subtitle in this DCP should be included in the output by reference
- * rather than by rewrapping.
+ /** true if the captions in this DCP should be included in the output by reference
+ * rather than by rewrapping. The types here are the original caption types,
+ * not what they are being used for.
*/
- bool _reference_subtitle;
+ bool _reference_caption[CAPTION_COUNT];
boost::optional<dcp::Standard> _standard;
bool _three_d;
#include <dcp/reel_picture_asset.h>
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
#include <dcp/mono_picture_frame.h>
#include <dcp/stereo_picture_frame.h>
#include <dcp/sound_frame.h>
if (c->audio) {
audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
- if (c->caption) {
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, c->caption) {
/* XXX: this time here should be the time of the first subtitle, not 0 */
- caption.reset (new CaptionDecoder (this, c->caption, log, ContentTime()));
+ caption.push_back (shared_ptr<CaptionDecoder> (new CaptionDecoder (this, i, log, ContentTime())));
}
list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = _next.frames_round (vfr);
- /* We must emit subtitles first as when we emit the video for this frame
- it will expect already to have the subs.
+ /* We must emit captions first as when we emit the video for this frame
+ it will expect already to have the captions.
*/
- pass_subtitles (_next);
+ pass_captions (_next);
if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
}
void
-DCPDecoder::pass_subtitles (ContentTime next)
+DCPDecoder::pass_captions (ContentTime next)
+{
+ list<shared_ptr<CaptionDecoder> >::const_iterator decoder = caption.begin ();
+ if ((*_reel)->main_subtitle()) {
+ pass_captions (
+ next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_caption(CAPTION_OPEN), (*_reel)->main_subtitle()->entry_point(), *decoder
+ );
+ ++decoder;
+ }
+ if ((*_reel)->closed_caption()) {
+ pass_captions (
+ next, (*_reel)->closed_caption()->asset(), _dcp_content->reference_caption(CAPTION_CLOSED), (*_reel)->closed_caption()->entry_point(), *decoder
+ );
+ ++decoder;
+ }
+}
+
+void
+DCPDecoder::pass_captions (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<CaptionDecoder> decoder)
{
double const vfr = _dcp_content->active_video_frame_rate ();
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = next.frames_round (vfr);
- if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
- int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
- list<shared_ptr<dcp::Subtitle> > subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
+ if (_decode_referenced || !reference) {
+ list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
dcp::Time (entry_point + frame, vfr, vfr),
dcp::Time (entry_point + frame + 1, vfr, vfr),
true
if (is) {
list<dcp::SubtitleString> s;
s.push_back (*is);
- caption->emit_plain (
+ decoder->emit_plain (
ContentTimePeriod (
ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
next_reel ();
}
- /* Pass subtitles in the pre-roll */
+ /* Pass captions in the pre-roll */
double const vfr = _dcp_content->active_video_frame_rate ();
for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
- pass_subtitles (pre);
+ pass_captions (pre);
pre += ContentTime::from_frames (1, vfr);
}
#include <dcp/mono_picture_asset_reader.h>
#include <dcp/stereo_picture_asset_reader.h>
#include <dcp/sound_asset_reader.h>
+#include <dcp/subtitle_asset.h>
namespace dcp {
class Reel;
void next_reel ();
void get_readers ();
- void pass_subtitles (ContentTime next);
+ void pass_captions (ContentTime next);
+ void pass_captions (ContentTime next, boost::shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, boost::shared_ptr<CaptionDecoder> decoder);
/** Time of next thing to return from pass relative to the start of _reel */
ContentTime _next;
_player_caption_connection = _player->Caption.connect (bind (&DCPEncoder::caption, this, _1, _2, _3));
BOOST_FOREACH (shared_ptr<const Content> c, film->content ()) {
- if (c->caption && c->caption->use() && !c->caption->burn()) {
- _non_burnt_subtitles = true;
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, c->caption) {
+ if (i->use() && !i->burn()) {
+ _non_burnt_subtitles = true;
+ }
}
}
}
#include <dcp/sound_asset_reader.h>
#include <dcp/subtitle_asset.h>
#include <dcp/reel_subtitle_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
#include <dcp/sound_asset.h>
#include <boost/foreach.hpp>
#include <iostream>
, _audio_length (0)
, _has_video (false)
, _has_audio (false)
- , _has_subtitles (false)
+ , _captions (0)
, _encrypted (false)
, _needs_assets (false)
, _kdm_valid (false)
return;
}
- _has_subtitles = true;
+ ++_captions;
+ }
+
+ if (i->closed_caption ()) {
+ if (!i->closed_caption()->asset_ref().resolved()) {
+ /* We are missing this asset so we can't continue; examination will be repeated later */
+ _needs_assets = true;
+ return;
+ }
+
+ ++_captions;
}
if (i->main_picture()) {
_reel_lengths.push_back (i->main_sound()->duration());
} else if (i->main_subtitle()) {
_reel_lengths.push_back (i->main_subtitle()->duration());
+ } else if (i->closed_caption()) {
+ _reel_lengths.push_back (i->closed_caption()->duration());
}
}
return _name;
}
- bool has_subtitles () const {
- return _has_subtitles;
+ int captions () const {
+ return _captions;
}
bool encrypted () const {
bool _has_video;
/** true if this DCP has audio content (but false if it has unresolved references to audio content) */
bool _has_audio;
- bool _has_subtitles;
+ int _captions;
bool _encrypted;
bool _needs_assets;
bool _kdm_valid;
DCPSubtitleContent::DCPSubtitleContent (shared_ptr<const Film> film, boost::filesystem::path path)
: Content (film, path)
{
- caption.reset (new CaptionContent (this));
+ caption.push_back (shared_ptr<CaptionContent> (new CaptionContent (this)));
}
DCPSubtitleContent::DCPSubtitleContent (shared_ptr<const Film> film, cxml::ConstNodePtr node, int version)
boost::mutex::scoped_lock lm (_mutex);
/* Default to turning these subtitles on */
- caption->set_use (true);
+ only_caption()->set_use (true);
if (iop) {
- caption->set_language (iop->language ());
+ only_caption()->set_language (iop->language ());
} else if (smpte) {
- caption->set_language (smpte->language().get_value_or (""));
+ only_caption()->set_language (smpte->language().get_value_or (""));
}
_length = ContentTime::from_seconds (sc->latest_subtitle_out().as_seconds ());
BOOST_FOREACH (shared_ptr<dcp::LoadFontNode> i, sc->load_font_nodes ()) {
- caption->add_font (shared_ptr<Font> (new Font (i->id)));
+ only_caption()->add_font (shared_ptr<Font> (new Font (i->id)));
}
}
node->add_child("Type")->add_child_text ("DCPSubtitle");
Content::as_xml (node, with_paths);
- if (caption) {
- caption->as_xml (node);
+ if (only_caption()) {
+ only_caption()->as_xml (node);
}
node->add_child("Length")->add_child_text (raw_convert<string> (_length.get ()));
if (_next != _subtitles.end()) {
first = content_time_period(*_next).from;
}
- caption.reset (new CaptionDecoder (this, content->caption, log, first));
+ caption.push_back (shared_ptr<CaptionDecoder> (new CaptionDecoder (this, content->only_caption(), log, first)));
}
void
/* XXX: image subtitles */
}
- caption->emit_plain (p, s);
+ only_caption()->emit_plain (p, s);
return false;
}
using std::cout;
using boost::optional;
+using boost::shared_ptr;
/** @return Earliest time of content that the next pass() will emit */
ContentTime
pos = audio->position();
}
- if (caption && !caption->ignore() && (!pos || caption->position() < *pos)) {
- pos = caption->position();
+ BOOST_FOREACH (shared_ptr<CaptionDecoder> i, caption) {
+ if (!i->ignore() && (!pos || i->position() < *pos)) {
+ pos = i->position();
+ }
}
return pos.get_value_or(ContentTime());
if (audio) {
audio->seek ();
}
- if (caption) {
- caption->seek ();
+ BOOST_FOREACH (shared_ptr<CaptionDecoder> i, caption) {
+ i->seek ();
}
}
+
+shared_ptr<CaptionDecoder>
+Decoder::only_caption () const
+{
+ DCPOMATIC_ASSERT (caption.size() < 2);
+ if (caption.empty ()) {
+ return shared_ptr<CaptionDecoder> ();
+ }
+ return caption.front ();
+}
boost::shared_ptr<VideoDecoder> video;
boost::shared_ptr<AudioDecoder> audio;
- boost::shared_ptr<CaptionDecoder> caption;
+ std::list<boost::shared_ptr<CaptionDecoder> > caption;
+
+ boost::shared_ptr<CaptionDecoder> only_caption () const;
/** Do some decoding and perhaps emit video, audio or subtitle data.
* @return true if this decoder will emit no more data unless a seek() happens.
}
}
- if (caption) {
- caption->as_xml (node);
+ if (only_caption()) {
+ only_caption()->as_xml (node);
}
boost::mutex::scoped_lock lm (_mutex);
_subtitle_streams = examiner->subtitle_streams ();
if (!_subtitle_streams.empty ()) {
- caption.reset (new CaptionContent (this));
+ caption.clear ();
+ caption.push_back (shared_ptr<CaptionContent> (new CaptionContent (this)));
_subtitle_stream = _subtitle_streams.front ();
}
s += "_" + video->identifier();
}
- if (caption && caption->use() && caption->burn()) {
- s += "_" + caption->identifier();
+ if (only_caption() && only_caption()->use() && only_caption()->burn()) {
+ s += "_" + only_caption()->identifier();
}
boost::mutex::scoped_lock lm (_mutex);
audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
- if (c->caption) {
+ if (c->only_caption()) {
/* XXX: this time here should be the time of the first subtitle, not 0 */
- caption.reset (new CaptionDecoder (this, c->caption, log, ContentTime()));
+ caption.push_back (shared_ptr<CaptionDecoder> (new CaptionDecoder (this, c->only_caption(), log, ContentTime())));
}
_next_time.resize (_format_context->nb_streams);
if (_video_stream && si == _video_stream.get() && !video->ignore()) {
decode_video_packet ();
- } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !caption->ignore()) {
+ } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_caption()->ignore()) {
decode_subtitle_packet ();
} else {
decode_audio_packet ();
/* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
if (_have_current_subtitle) {
if (_current_subtitle_to) {
- caption->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
+ only_caption()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
} else {
- caption->emit_stop (subtitle_period(sub).from + _pts_offset);
+ only_caption()->emit_stop (subtitle_period(sub).from + _pts_offset);
}
_have_current_subtitle = false;
}
}
if (_current_subtitle_to) {
- caption->emit_stop (*_current_subtitle_to);
+ only_caption()->emit_stop (*_current_subtitle_to);
}
avsubtitle_free (&sub);
static_cast<double> (rect->h) / target_height
);
- caption->emit_bitmap_start (from, image, scaled_rect);
+ only_caption()->emit_bitmap_start (from, image, scaled_rect);
}
void
);
BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
- caption->emit_plain_start (from, i);
+ only_caption()->emit_plain_start (from, i);
}
}
d += "_" + dm.audio_language;
if (!dm.subtitle_language.empty()) {
- bool burnt_in = true;
- BOOST_FOREACH (shared_ptr<Content> i, content ()) {
- if (!i->caption) {
- continue;
- }
+ /* I'm not clear on the precise details of the convention for CCAP labelling;
+ for now I'm just appending -CCAP if we have any closed captions.
+ */
- if (i->caption->use() && !i->caption->burn()) {
- burnt_in = false;
+ bool burnt_in = true;
+ bool ccap = false;
+ BOOST_FOREACH (shared_ptr<Content> i, content()) {
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->caption) {
+ if (j->type() == CAPTION_OPEN && j->use() && !j->burn()) {
+ burnt_in = false;
+ } else if (j->type() == CAPTION_CLOSED) {
+ ccap = true;
+ }
}
}
}
d += "-" + language;
+ if (ccap) {
+ d += "-CCAP";
+ }
} else {
d += "-XX";
}
bool vf = false;
BOOST_FOREACH (shared_ptr<Content> i, content ()) {
shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
- if (dc && (dc->reference_video() || dc->reference_audio() || dc->reference_subtitle())) {
+ bool any_caption = false;
+ for (int i = 0; i < CAPTION_COUNT; ++i) {
+ if (dc->reference_caption(static_cast<CaptionType>(i))) {
+ any_caption = true;
+ }
+ }
+ if (dc && (dc->reference_video() || dc->reference_audio() || any_caption)) {
vf = true;
}
}
{
/* Add {video,subtitle} content after any existing {video,subtitle} content */
if (c->video) {
- c->set_position (_playlist->video_end ());
- } else if (c->caption) {
- c->set_position (_playlist->subtitle_end ());
+ c->set_position (_playlist->video_end());
+ } else if (!c->caption.empty()) {
+ c->set_position (_playlist->caption_end());
}
if (_template_film) {
{
set<string> languages;
- ContentList cl = content ();
- BOOST_FOREACH (shared_ptr<Content>& c, cl) {
- if (c->caption) {
- languages.insert (c->caption->language ());
+ BOOST_FOREACH (shared_ptr<Content> i, content()) {
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->caption) {
+ languages.insert (j->language ());
}
}
bool big_font_files = false;
if (film->interop ()) {
BOOST_FOREACH (shared_ptr<Content> i, content) {
- if (i->caption) {
- BOOST_FOREACH (shared_ptr<Font> j, i->caption->fonts ()) {
- for (int k = 0; k < FontFiles::VARIANTS; ++k) {
- optional<boost::filesystem::path> const p = j->file (static_cast<FontFiles::Variant> (k));
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->caption) {
+ BOOST_FOREACH (shared_ptr<Font> k, j->fonts()) {
+ for (int l = 0; l < FontFiles::VARIANTS; ++l) {
+ optional<boost::filesystem::path> const p = k->file (static_cast<FontFiles::Variant>(l));
if (p && boost::filesystem::file_size (p.get()) >= (640 * 1024)) {
big_font_files = true;
}
using boost::shared_ptr;
using boost::function;
-ContentList overlaps (ContentList cl, function<shared_ptr<ContentPart> (shared_ptr<const Content>)> part, DCPTime from, DCPTime to)
+ContentList overlaps (ContentList cl, function<bool (shared_ptr<const Content>)> part, DCPTime from, DCPTime to)
{
ContentList overlaps;
DCPTimePeriod period (from, to);
* ContentList
*/
ContentList overlaps (
- ContentList cl, boost::function<boost::shared_ptr<ContentPart> (boost::shared_ptr<const Content>)> part, DCPTime from, DCPTime to
+ ContentList cl, boost::function<bool (boost::shared_ptr<const Content>)> part, DCPTime from, DCPTime to
);
#include <dcp/reel_sound_asset.h>
#include <dcp/reel_subtitle_asset.h>
#include <dcp/reel_picture_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
#include <boost/foreach.hpp>
#include <stdint.h>
#include <algorithm>
, _playlist (playlist)
, _have_valid_pieces (false)
, _ignore_video (false)
- , _ignore_subtitle (false)
+ , _ignore_caption (false)
, _fast (false)
, _play_referenced (false)
, _audio_merger (_film->audio_frame_rate())
decoder->video->set_ignore (true);
}
- if (decoder->caption && _ignore_subtitle) {
- decoder->caption->set_ignore (true);
+ if (_ignore_caption) {
+ BOOST_FOREACH (shared_ptr<CaptionDecoder> i, decoder->caption) {
+ i->set_ignore (true);
+ }
}
shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
}
- if (decoder->caption) {
- decoder->caption->BitmapStart.connect (
- bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1)
+ list<shared_ptr<CaptionDecoder> >::const_iterator j = decoder->caption.begin();
+
+ while (j != decoder->caption.end()) {
+ (*j)->BitmapStart.connect (
+ bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
);
- decoder->caption->PlainStart.connect (
- bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1)
+ (*j)->PlainStart.connect (
+ bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
);
- decoder->caption->Stop.connect (
- bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1, _2)
+ (*j)->Stop.connect (
+ bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1, _2)
);
+
+ ++j;
}
}
}
list<shared_ptr<Font> > fonts;
- BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
- if (p->content->caption) {
+ BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->content->caption) {
/* XXX: things may go wrong if there are duplicate font IDs
with different font files.
*/
- list<shared_ptr<Font> > f = p->content->caption->fonts ();
+ list<shared_ptr<Font> > f = j->fonts ();
copy (f.begin(), f.end(), back_inserter (fonts));
}
}
}
void
-Player::set_ignore_subtitle ()
+Player::set_ignore_caption ()
{
- _ignore_subtitle = true;
+ _ignore_caption = true;
}
/** Set a type of caption that this player should always burn into the image,
);
}
- if (j->reference_subtitle ()) {
+ if (j->reference_caption (CAPTION_OPEN)) {
shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
DCPOMATIC_ASSERT (ra);
ra->set_entry_point (ra->entry_point() + trim_start);
);
}
+ if (j->reference_caption (CAPTION_CLOSED)) {
+ shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
+ DCPOMATIC_ASSERT (ra);
+ ra->set_entry_point (ra->entry_point() + trim_start);
+ ra->set_duration (ra->duration() - trim_start - trim_end);
+ a.push_back (
+ ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
+ );
+ }
+
/* Assume that main picture duration is the length of the reel */
offset += k->main_picture()->duration ();
}
i->done = true;
} else {
- /* Given two choices at the same time, pick the one with a subtitle so we see it before
+ /* Given two choices at the same time, pick the one with captions so we see it before
the video.
*/
- if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->caption)) {
+ if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->caption.empty())) {
earliest_time = t;
earliest_content = i;
}
}
void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentBitmapCaption subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentBitmapCaption subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<CaptionContent> caption = wc.lock ();
+ shared_ptr<const CaptionContent> caption = wc.lock ();
if (!piece || !caption) {
return;
}
}
void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentTextCaption subtitle)
+Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTextCaption subtitle)
{
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<CaptionContent> caption = wc.lock ();
+ shared_ptr<const CaptionContent> caption = wc.lock ();
if (!piece || !caption) {
return;
}
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentTime to, CaptionType type)
+Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTime to, CaptionType type)
{
if (!_active_captions[type].have (wc)) {
return;
}
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<CaptionContent> caption = wc.lock ();
+ shared_ptr<const CaptionContent> caption = wc.lock ();
if (!piece || !caption) {
return;
}
void set_video_container_size (dcp::Size);
void set_ignore_video ();
- void set_ignore_subtitle ();
+ void set_ignore_caption ();
void set_always_burn_captions (CaptionType type);
void set_fast ();
void set_play_referenced ();
boost::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
void video (boost::weak_ptr<Piece>, ContentVideo);
void audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
- void bitmap_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<CaptionContent>, ContentBitmapCaption);
- void plain_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<CaptionContent>, ContentTextCaption);
- void subtitle_stop (boost::weak_ptr<Piece>, boost::weak_ptr<CaptionContent>, ContentTime, CaptionType);
+ void bitmap_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<const CaptionContent>, ContentBitmapCaption);
+ void plain_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<const CaptionContent>, ContentTextCaption);
+ void subtitle_stop (boost::weak_ptr<Piece>, boost::weak_ptr<const CaptionContent>, ContentTime, CaptionType);
DCPTime one_video_frame () const;
void fill_audio (DCPTimePeriod period);
std::pair<boost::shared_ptr<AudioBuffers>, DCPTime> discard_audio (
/** true if the player should ignore all video; i.e. never produce any */
bool _ignore_video;
- /** true if the player should ignore all audio; i.e. never produce any */
- bool _ignore_subtitle;
+ /** true if the player should ignore all captions; i.e. never produce any */
+ bool _ignore_caption;
/** Type of captions that the player should always burn into the video regardless
of content settings.
*/
placed.push_back (i);
}
- /* Subtitles */
+ /* Captions */
DCPTime next;
BOOST_FOREACH (shared_ptr<Content> i, _content) {
- if (!i->caption || find (placed.begin(), placed.end(), i) != placed.end()) {
+ if (i->caption.empty() || find (placed.begin(), placed.end(), i) != placed.end()) {
continue;
}
string t;
BOOST_FOREACH (shared_ptr<const Content> i, _content) {
- if (i->video || (i->caption && i->caption->burn())) {
+ bool burn = false;
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->caption) {
+ if (j->burn()) {
+ burn = true;
+ }
+ }
+ if (i->video || burn) {
t += i->identifier ();
}
}
}
DCPTime
-Playlist::subtitle_end () const
+Playlist::caption_end () const
{
DCPTime end;
BOOST_FOREACH (shared_ptr<Content> i, _content) {
- if (i->caption) {
+ if (!i->caption.empty ()) {
end = max (end, i->end ());
}
}
int best_video_frame_rate () const;
DCPTime video_end () const;
- DCPTime subtitle_end () const;
+ DCPTime caption_end () const;
FrameRateChange active_frame_rate_change (DCPTime, int dcp_frame_rate) const;
std::string content_summary (DCPTimePeriod period) const;
std::pair<double, double> speed_up_range (int dcp_video_frame_rate) const;
TextCaptionFileContent::TextCaptionFileContent (shared_ptr<const Film> film, boost::filesystem::path path)
: Content (film, path)
{
- caption.reset (new CaptionContent (this));
+ caption.push_back (shared_ptr<CaptionContent> (new CaptionContent (this)));
}
TextCaptionFileContent::TextCaptionFileContent (shared_ptr<const Film> film, cxml::ConstNodePtr node, int version)
TextCaptionFile s (shared_from_this ());
/* Default to turning these subtitles on */
- caption->set_use (true);
+ only_caption()->set_use (true);
boost::mutex::scoped_lock lm (_mutex);
_length = s.length ();
- caption->add_font (shared_ptr<Font> (new Font (TEXT_FONT_ID)));
+ only_caption()->add_font (shared_ptr<Font> (new Font (TEXT_FONT_ID)));
}
string
node->add_child("Type")->add_child_text ("TextSubtitle");
Content::as_xml (node, with_paths);
- if (caption) {
- caption->as_xml (node);
+ if (only_caption()) {
+ only_caption()->as_xml (node);
}
node->add_child("Length")->add_child_text (raw_convert<string> (_length.get ()));
if (!_subtitles.empty()) {
first = content_time_period(_subtitles[0]).from;
}
- caption.reset (new CaptionDecoder (this, content->caption, log, first));
+ caption.push_back (shared_ptr<CaptionDecoder> (new CaptionDecoder (this, content->only_caption(), log, first)));
}
void
}
ContentTimePeriod const p = content_time_period (_subtitles[_next]);
- caption->emit_plain (p, _subtitles[_next]);
+ only_caption()->emit_plain (p, _subtitles[_next]);
++_next;
return false;
REELTYPE_BY_LENGTH
};
+/** Type of captions.
+ * For better or worse DoM has uses two names for text that appears
+ * with the DCP:
+ *
+ * open captions: text that is shown to everybody on-screen (aka subtitles).
+ * closed captions: text that is shown to some viewers using some other method.
+ *
+ * There is also still use of the word `subtitle' in the code; these are the
+ * same as open captions in DoM.
+ */
enum CaptionType
{
CAPTION_OPEN,
{
DCPOMATIC_ASSERT (_clipboard);
- PasteDialog* d = new PasteDialog (this, static_cast<bool>(_clipboard->video), static_cast<bool>(_clipboard->audio), static_cast<bool>(_clipboard->caption));
+ PasteDialog* d = new PasteDialog (this, static_cast<bool>(_clipboard->video), static_cast<bool>(_clipboard->audio), !_clipboard->caption.empty());
if (d->ShowModal() == wxID_OK) {
BOOST_FOREACH (shared_ptr<Content> i, _film_editor->content_panel()->selected()) {
if (d->video() && i->video) {
DCPOMATIC_ASSERT (_clipboard->audio);
i->audio->take_settings_from (_clipboard->audio);
}
- if (d->caption() && i->caption) {
- DCPOMATIC_ASSERT (_clipboard->caption);
- i->caption->take_settings_from (_clipboard->caption);
+
+ if (d->caption()) {
+ list<shared_ptr<CaptionContent> >::iterator j = i->caption.begin ();
+ list<shared_ptr<CaptionContent> >::const_iterator k = _clipboard->caption.begin ();
+ while (j != i->caption.end() && k != _clipboard->caption.end()) {
+ (*j)->take_settings_from (*k);
+ ++j;
+ ++k;
+ }
}
}
}
void setup_from_dcp (shared_ptr<DCPContent> dcp)
{
- if (dcp->caption) {
- dcp->caption->set_use (true);
+ BOOST_FOREACH (shared_ptr<CaptionContent> i, dcp->caption) {
+ /* XXX: we should offer the option to view closed captions */
+ if (i->type() == CAPTION_OPEN) {
+ i->set_use (true);
+ }
}
if (dcp->video) {
int const CaptionAppearanceDialog::OUTLINE = 1;
int const CaptionAppearanceDialog::SHADOW = 2;
-CaptionAppearanceDialog::CaptionAppearanceDialog (wxWindow* parent, shared_ptr<Content> content)
+CaptionAppearanceDialog::CaptionAppearanceDialog (wxWindow* parent, shared_ptr<Content> content, shared_ptr<CaptionContent> caption)
: wxDialog (parent, wxID_ANY, _("Caption appearance"))
, _content (content)
+ , _caption (caption)
{
shared_ptr<FFmpegContent> ff = dynamic_pointer_cast<FFmpegContent> (content);
if (ff) {
_effect->Append (_("Outline"));
_effect->Append (_("Shadow"));;
- optional<dcp::Colour> colour = _content->caption->colour();
+ optional<dcp::Colour> colour = _caption->colour();
_force_colour->SetValue (static_cast<bool>(colour));
if (colour) {
_colour->SetColour (wxColour (colour->r, colour->g, colour->b));
_colour->SetColour (wxColour (255, 255, 255));
}
- optional<dcp::Effect> effect = _content->caption->effect();
+ optional<dcp::Effect> effect = _caption->effect();
_force_effect->SetValue (static_cast<bool>(effect));
if (effect) {
switch (*effect) {
_effect->SetSelection (NONE);
}
- optional<dcp::Colour> effect_colour = _content->caption->effect_colour();
+ optional<dcp::Colour> effect_colour = _caption->effect_colour();
_force_effect_colour->SetValue (static_cast<bool>(effect_colour));
if (effect_colour) {
_effect_colour->SetColour (wxColour (effect_colour->r, effect_colour->g, effect_colour->b));
_effect_colour->SetColour (wxColour (0, 0, 0));
}
- optional<ContentTime> fade_in = _content->caption->fade_in();
+ optional<ContentTime> fade_in = _caption->fade_in();
_force_fade_in->SetValue (static_cast<bool>(fade_in));
if (fade_in) {
_fade_in->set (*fade_in, _content->active_video_frame_rate());
_fade_in->set (ContentTime(), _content->active_video_frame_rate());
}
- optional<ContentTime> fade_out = _content->caption->fade_out();
+ optional<ContentTime> fade_out = _caption->fade_out();
_force_fade_out->SetValue (static_cast<bool>(fade_out));
if (fade_out) {
_fade_out->set (*fade_out, _content->active_video_frame_rate ());
_fade_out->set (ContentTime(), _content->active_video_frame_rate ());
}
- _outline_width->SetValue (_content->caption->outline_width ());
+ _outline_width->SetValue (_caption->outline_width ());
_force_colour->Bind (wxEVT_CHECKBOX, bind (&CaptionAppearanceDialog::setup_sensitivity, this));
_force_effect_colour->Bind (wxEVT_CHECKBOX, bind (&CaptionAppearanceDialog::setup_sensitivity, this));
{
if (_force_colour->GetValue ()) {
wxColour const c = _colour->GetColour ();
- _content->caption->set_colour (dcp::Colour (c.Red(), c.Green(), c.Blue()));
+ _caption->set_colour (dcp::Colour (c.Red(), c.Green(), c.Blue()));
} else {
- _content->caption->unset_colour ();
+ _caption->unset_colour ();
}
if (_force_effect->GetValue()) {
switch (_effect->GetSelection()) {
case NONE:
- _content->caption->set_effect (dcp::NONE);
+ _caption->set_effect (dcp::NONE);
break;
case OUTLINE:
- _content->caption->set_effect (dcp::BORDER);
+ _caption->set_effect (dcp::BORDER);
break;
case SHADOW:
- _content->caption->set_effect (dcp::SHADOW);
+ _caption->set_effect (dcp::SHADOW);
break;
}
} else {
- _content->caption->unset_effect ();
+ _caption->unset_effect ();
}
if (_force_effect_colour->GetValue ()) {
wxColour const ec = _effect_colour->GetColour ();
- _content->caption->set_effect_colour (dcp::Colour (ec.Red(), ec.Green(), ec.Blue()));
+ _caption->set_effect_colour (dcp::Colour (ec.Red(), ec.Green(), ec.Blue()));
} else {
- _content->caption->unset_effect_colour ();
+ _caption->unset_effect_colour ();
}
if (_force_fade_in->GetValue ()) {
- _content->caption->set_fade_in (_fade_in->get (_content->active_video_frame_rate ()));
+ _caption->set_fade_in (_fade_in->get (_content->active_video_frame_rate ()));
} else {
- _content->caption->unset_fade_in ();
+ _caption->unset_fade_in ();
}
if (_force_fade_out->GetValue ()) {
- _content->caption->set_fade_out (_fade_out->get (_content->active_video_frame_rate ()));
+ _caption->set_fade_out (_fade_out->get (_content->active_video_frame_rate ()));
} else {
- _content->caption->unset_fade_out ();
+ _caption->unset_fade_out ();
}
- _content->caption->set_outline_width (_outline_width->GetValue ());
+ _caption->set_outline_width (_outline_width->GetValue ());
if (_stream) {
for (map<RGBA, RGBAColourPicker*>::const_iterator i = _pickers.begin(); i != _pickers.end(); ++i) {
_fade_in->Enable (_force_fade_in->GetValue ());
_fade_out->Enable (_force_fade_out->GetValue ());
- bool const can_outline_width = _effect->GetSelection() == OUTLINE && _content->caption->burn ();
+ bool const can_outline_width = _effect->GetSelection() == OUTLINE && _caption->burn ();
_outline_width->Enable (can_outline_width);
if (can_outline_width) {
_outline_width->UnsetToolTip ();
class CaptionAppearanceDialog : public wxDialog
{
public:
- CaptionAppearanceDialog (wxWindow* parent, boost::shared_ptr<Content> content);
+ CaptionAppearanceDialog (wxWindow* parent, boost::shared_ptr<Content> content, boost::shared_ptr<CaptionContent> caption);
void apply ();
std::map<RGBA, RGBAColourPicker*> _pickers;
boost::shared_ptr<Content> _content;
+ boost::shared_ptr<CaptionContent> _caption;
boost::shared_ptr<FFmpegSubtitleStream> _stream;
boost::signals2::scoped_connection _content_connection;
: ContentSubPanel (p, _("Captions"))
, _caption_view (0)
, _fonts_dialog (0)
+ , _original_type (CAPTION_OPEN)
{
wxBoxSizer* reference_sizer = new wxBoxSizer (wxVERTICAL);
_reference->Bind (wxEVT_CHECKBOX, boost::bind (&CaptionPanel::reference_clicked, this));
_use->Bind (wxEVT_CHECKBOX, boost::bind (&CaptionPanel::use_toggled, this));
- _type->Bind (wxEVT_CHOICE, boost::bind (&CaptionPanel::type_changed, this));
+ _type->Bind (wxEVT_CHOICE, boost::bind (&CaptionPanel::type_changed, this));
_burn->Bind (wxEVT_CHECKBOX, boost::bind (&CaptionPanel::burn_toggled, this));
_x_offset->Bind (wxEVT_SPINCTRL, boost::bind (&CaptionPanel::x_offset_changed, this));
_y_offset->Bind (wxEVT_SPINCTRL, boost::bind (&CaptionPanel::y_offset_changed, this));
}
setup_sensitivity ();
} else if (property == CaptionContentProperty::USE) {
- checked_set (_use, scs ? scs->caption->use() : false);
+ checked_set (_use, scs ? scs->caption_of_original_type(_original_type)->use() : false);
setup_sensitivity ();
} else if (property == CaptionContentProperty::TYPE) {
if (scs) {
- switch (scs->caption->type()) {
+ switch (scs->caption_of_original_type(_original_type)->type()) {
case CAPTION_OPEN:
_type->SetSelection (0);
break;
}
setup_sensitivity ();
} else if (property == CaptionContentProperty::BURN) {
- checked_set (_burn, scs ? scs->caption->burn() : false);
+ checked_set (_burn, scs ? scs->caption_of_original_type(_original_type)->burn() : false);
} else if (property == CaptionContentProperty::X_OFFSET) {
- checked_set (_x_offset, scs ? lrint (scs->caption->x_offset() * 100) : 0);
+ checked_set (_x_offset, scs ? lrint (scs->caption_of_original_type(_original_type)->x_offset() * 100) : 0);
} else if (property == CaptionContentProperty::Y_OFFSET) {
- checked_set (_y_offset, scs ? lrint (scs->caption->y_offset() * 100) : 0);
+ checked_set (_y_offset, scs ? lrint (scs->caption_of_original_type(_original_type)->y_offset() * 100) : 0);
} else if (property == CaptionContentProperty::X_SCALE) {
- checked_set (_x_scale, scs ? lrint (scs->caption->x_scale() * 100) : 100);
+ checked_set (_x_scale, scs ? lrint (scs->caption_of_original_type(_original_type)->x_scale() * 100) : 100);
} else if (property == CaptionContentProperty::Y_SCALE) {
- checked_set (_y_scale, scs ? lrint (scs->caption->y_scale() * 100) : 100);
+ checked_set (_y_scale, scs ? lrint (scs->caption_of_original_type(_original_type)->y_scale() * 100) : 100);
} else if (property == CaptionContentProperty::LINE_SPACING) {
- checked_set (_line_spacing, scs ? lrint (scs->caption->line_spacing() * 100) : 100);
+ checked_set (_line_spacing, scs ? lrint (scs->caption_of_original_type(_original_type)->line_spacing() * 100) : 100);
} else if (property == CaptionContentProperty::LANGUAGE) {
- checked_set (_language, scs ? scs->caption->language() : "");
- } else if (property == DCPContentProperty::REFERENCE_SUBTITLE) {
+ checked_set (_language, scs ? scs->caption_of_original_type(_original_type)->language() : "");
+ } else if (property == DCPContentProperty::REFERENCE_CAPTION) {
if (scs) {
shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent> (scs);
- checked_set (_reference, dcp ? dcp->reference_subtitle () : false);
+ checked_set (_reference, dcp ? dcp->reference_caption(_original_type) : false);
} else {
checked_set (_reference, false);
}
setup_sensitivity ();
- } else if (property == DCPContentProperty::HAS_SUBTITLES) {
+ } else if (property == DCPContentProperty::CAPTIONS) {
setup_sensitivity ();
}
}
CaptionPanel::use_toggled ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption()) {
- i->caption->set_use (_use->GetValue());
+ i->caption_of_original_type(_original_type)->set_use (_use->GetValue());
}
}
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption()) {
switch (_type->GetSelection()) {
case 0:
- i->caption->set_type (CAPTION_OPEN);
+ i->caption_of_original_type(_original_type)->set_type (CAPTION_OPEN);
break;
case 1:
- i->caption->set_type (CAPTION_CLOSED);
+ i->caption_of_original_type(_original_type)->set_type (CAPTION_CLOSED);
break;
}
}
CaptionPanel::burn_toggled ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_burn (_burn->GetValue());
+ i->caption_of_original_type(_original_type)->set_burn (_burn->GetValue());
}
}
shared_ptr<const DCPContent> dc = boost::dynamic_pointer_cast<const DCPContent> (i);
shared_ptr<const DCPSubtitleContent> dsc = boost::dynamic_pointer_cast<const DCPSubtitleContent> (i);
if (fc) {
- if (fc->caption) {
+ if (!fc->caption.empty()) {
++ffmpeg_subs;
++any_subs;
}
}
string why_not;
- bool const can_reference = dcp && dcp->can_reference_subtitle (why_not);
+ bool const can_reference = dcp && dcp->can_reference_caption (_original_type, why_not);
setup_refer_button (_reference, _reference_note, dcp, can_reference, why_not);
bool const reference = _reference->GetValue ();
CaptionPanel::x_offset_changed ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_x_offset (_x_offset->GetValue() / 100.0);
+ i->caption_of_original_type(_original_type)->set_x_offset (_x_offset->GetValue() / 100.0);
}
}
CaptionPanel::y_offset_changed ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_y_offset (_y_offset->GetValue() / 100.0);
+ i->caption_of_original_type(_original_type)->set_y_offset (_y_offset->GetValue() / 100.0);
}
}
{
ContentList c = _parent->selected_caption ();
if (c.size() == 1) {
- c.front()->caption->set_x_scale (_x_scale->GetValue() / 100.0);
+ c.front()->caption_of_original_type(_original_type)->set_x_scale (_x_scale->GetValue() / 100.0);
}
}
CaptionPanel::y_scale_changed ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_y_scale (_y_scale->GetValue() / 100.0);
+ i->caption_of_original_type(_original_type)->set_y_scale (_y_scale->GetValue() / 100.0);
}
}
CaptionPanel::line_spacing_changed ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_line_spacing (_line_spacing->GetValue() / 100.0);
+ i->caption_of_original_type(_original_type)->set_line_spacing (_line_spacing->GetValue() / 100.0);
}
}
CaptionPanel::language_changed ()
{
BOOST_FOREACH (shared_ptr<Content> i, _parent->selected_caption ()) {
- i->caption->set_language (wx_to_std (_language->GetValue()));
+ i->caption_of_original_type(_original_type)->set_language (wx_to_std (_language->GetValue()));
}
}
film_content_changed (CaptionContentProperty::LANGUAGE);
film_content_changed (CaptionContentProperty::FONTS);
film_content_changed (CaptionContentProperty::TYPE);
- film_content_changed (DCPContentProperty::REFERENCE_SUBTITLE);
+ film_content_changed (DCPContentProperty::REFERENCE_CAPTION);
}
void
shared_ptr<Decoder> decoder = decoder_factory (c.front(), _parent->film()->log(), false);
if (decoder) {
- _caption_view = new CaptionView (this, _parent->film(), c.front(), decoder, _parent->film_viewer());
+ _caption_view = new CaptionView (this, _parent->film(), c.front(), c.front()->caption_of_original_type(_original_type), decoder, _parent->film_viewer());
_caption_view->Show ();
}
}
ContentList c = _parent->selected_caption ();
DCPOMATIC_ASSERT (c.size() == 1);
- _fonts_dialog = new FontsDialog (this, c.front ());
+ _fonts_dialog = new FontsDialog (this, c.front(), c.front()->caption_of_original_type(_original_type));
_fonts_dialog->Show ();
}
return;
}
- d->set_reference_subtitle (_reference->GetValue ());
+ d->set_reference_caption (_original_type, _reference->GetValue ());
}
void
ContentList c = _parent->selected_caption ();
DCPOMATIC_ASSERT (c.size() == 1);
- CaptionAppearanceDialog* d = new CaptionAppearanceDialog (this, c.front());
+ CaptionAppearanceDialog* d = new CaptionAppearanceDialog (this, c.front(), c.front()->caption_of_original_type(_original_type));
if (d->ShowModal () == wxID_OK) {
d->apply ();
}
wxButton* _fonts_dialog_button;
FontsDialog* _fonts_dialog;
wxButton* _appearance_dialog_button;
+ CaptionType _original_type;
};
using boost::bind;
using boost::dynamic_pointer_cast;
-CaptionView::CaptionView (wxWindow* parent, shared_ptr<Film> film, shared_ptr<Content> content, shared_ptr<Decoder> decoder, FilmViewer* viewer)
+CaptionView::CaptionView (wxWindow* parent, shared_ptr<Film> film, shared_ptr<Content> content, shared_ptr<CaptionContent> caption, shared_ptr<Decoder> decoder, FilmViewer* viewer)
: wxDialog (parent, wxID_ANY, _("Captions"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER)
, _content (content)
, _film_viewer (viewer)
_subs = 0;
_frc = film->active_frame_rate_change (content->position());
- decoder->caption->PlainStart.connect (bind (&CaptionView::data_start, this, _1));
- decoder->caption->Stop.connect (bind (&CaptionView::data_stop, this, _1));
+
+ /* Find the decoder that is being used for our CaptionContent and attach to it */
+ BOOST_FOREACH (shared_ptr<CaptionDecoder> i, decoder->caption) {
+ if (i->content() == caption) {
+ i->PlainStart.connect (bind (&CaptionView::data_start, this, _1));
+ i->Stop.connect (bind (&CaptionView::data_stop, this, _1));
+ }
+ }
while (!decoder->pass ()) {}
SetSizerAndFit (sizer);
}
class CaptionView : public wxDialog
{
public:
- CaptionView (wxWindow *, boost::shared_ptr<Film>, boost::shared_ptr<Content> content, boost::shared_ptr<Decoder>, FilmViewer* viewer);
+ CaptionView (
+ wxWindow *, boost::shared_ptr<Film>, boost::shared_ptr<Content> content, boost::shared_ptr<CaptionContent> caption, boost::shared_ptr<Decoder>, FilmViewer* viewer
+ );
private:
void data_start (ContentTextCaption cts);
ContentList sc;
BOOST_FOREACH (shared_ptr<Content> i, selected ()) {
- if (i->caption) {
+ if (!i->caption.empty()) {
sc.push_back (i);
}
}
_video_panel->Enable (_generally_sensitive && video_selection.size() > 0);
_audio_panel->Enable (_generally_sensitive && audio_selection.size() > 0);
- _caption_panel->Enable (_generally_sensitive && selection.size() == 1 && selection.front()->caption);
+ _caption_panel->Enable (_generally_sensitive && selection.size() == 1 && !selection.front()->caption.empty());
_timing_panel->Enable (_generally_sensitive);
}
property == VideoContentProperty::SCALE ||
property == DCPContentProperty::REFERENCE_VIDEO ||
property == DCPContentProperty::REFERENCE_AUDIO ||
- property == DCPContentProperty::REFERENCE_SUBTITLE) {
+ property == DCPContentProperty::REFERENCE_CAPTION) {
setup_dcp_name ();
setup_sensitivity ();
}
using std::cout;
using boost::shared_ptr;
-FontsDialog::FontsDialog (wxWindow* parent, shared_ptr<Content> content)
+FontsDialog::FontsDialog (wxWindow* parent, shared_ptr<Content> content, shared_ptr<CaptionContent> caption)
: wxDialog (parent, wxID_ANY, _("Fonts"))
, _content (content)
+ , _caption (caption)
{
_fonts = new wxListCtrl (this, wxID_ANY, wxDefaultPosition, wxSize (550, 200), wxLC_REPORT | wxLC_SINGLE_SEL);
FontsDialog::setup ()
{
shared_ptr<Content> content = _content.lock ();
- if (!content) {
+ shared_ptr<CaptionContent> caption = _caption.lock ();
+ if (!content || !caption) {
return;
}
_fonts->DeleteAllItems ();
size_t n = 0;
- BOOST_FOREACH (shared_ptr<Font> i, content->caption->fonts ()) {
+ BOOST_FOREACH (shared_ptr<Font> i, caption->fonts ()) {
wxListItem item;
item.SetId (n);
_fonts->InsertItem (item);
FontsDialog::edit_clicked ()
{
shared_ptr<Content> content = _content.lock ();
- if (!content) {
+ shared_ptr<CaptionContent> caption = _caption.lock ();
+ if (!content || !caption) {
return;
}
int const item = _fonts->GetNextItem (-1, wxLIST_NEXT_ALL, wxLIST_STATE_SELECTED);
string const id = wx_to_std (_fonts->GetItemText (item, 0));
shared_ptr<Font> font;
- BOOST_FOREACH (shared_ptr<Font> i, content->caption->fonts()) {
+ BOOST_FOREACH (shared_ptr<Font> i, caption->fonts()) {
if (i->id() == id) {
font = i;
}
/*
- Copyright (C) 2014-2016 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2014-2018 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include <boost/filesystem.hpp>
class Content;
+class CaptionContent;
class FontsDialog : public wxDialog
{
public:
- FontsDialog (wxWindow* parent, boost::shared_ptr<Content>);
+ FontsDialog (wxWindow* parent, boost::shared_ptr<Content>, boost::shared_ptr<CaptionContent> caption);
private:
void setup ();
void edit_clicked ();
boost::weak_ptr<Content> _content;
+ boost::weak_ptr<CaptionContent> _caption;
wxListCtrl* _fonts;
wxButton* _edit;
};
if (dcp->audio && !dcp->audio->streams().empty()) {
checked_set (_dcp[r++], wxString::Format(_("Audio channels: %d"), dcp->audio->streams().front()->channels()));
}
- if (dcp->caption) {
+ if (!dcp->caption.empty()) {
checked_set (_dcp[r++], _("Subtitles: yes"));
} else {
checked_set (_dcp[r++], _("Subtitles: no"));
_views.push_back (shared_ptr<TimelineView> (new TimelineAudioContentView (*this, i)));
}
- if (i->caption) {
- _views.push_back (shared_ptr<TimelineView> (new TimelineTextContentView (*this, i)));
+ BOOST_FOREACH (shared_ptr<CaptionContent> j, i->caption) {
+ _views.push_back (shared_ptr<TimelineView> (new TimelineTextContentView (*this, i, j)));
}
if (dynamic_pointer_cast<AtmosMXFContent> (i)) {
/* Tracks are:
Video (mono or left-eye)
Video (right-eye)
- Subtitle 1
- Subtitle 2
- Subtitle N
+ Caption 1
+ Caption 2
+ Caption N
Atmos
Audio 1
Audio 2
_tracks = max (_tracks, 1);
- /* Subtitle */
+ /* Captions */
- int const subtitle_tracks = place<TimelineTextContentView> (_views, _tracks);
+ int const caption_tracks = place<TimelineTextContentView> (_views, _tracks);
/* Atmos */
_labels_view->set_3d (have_3d);
_labels_view->set_audio_tracks (audio_tracks);
- _labels_view->set_subtitle_tracks (subtitle_tracks);
+ _labels_view->set_caption_tracks (caption_tracks);
_labels_view->set_atmos (have_atmos);
_time_axis_view->set_y (tracks());
: TimelineView (tl)
, _threed (true)
, _audio_tracks (0)
- , _subtitle_tracks (0)
+ , _caption_tracks (0)
, _atmos (true)
{
wxString labels[] = {
_("Video"),
_("Audio"),
- _("Subtitles"),
+ _("Captions"),
_("Atmos")
};
gc->DrawText (_("Video"), 0, (ty + fy) / 2 - 8);
fy = ty;
- if (_subtitle_tracks) {
- ty = fy + _subtitle_tracks * h;
- gc->DrawText (_("Subtitles"), 0, (ty + fy) / 2 - 8);
+ if (_caption_tracks) {
+ ty = fy + _caption_tracks * h;
+ gc->DrawText (_("Captions"), 0, (ty + fy) / 2 - 8);
fy = ty;
}
}
void
-TimelineLabelsView::set_subtitle_tracks (int n)
+TimelineLabelsView::set_caption_tracks (int n)
{
- _subtitle_tracks = n;
+ _caption_tracks = n;
}
void
void set_3d (bool s);
void set_audio_tracks (int n);
- void set_subtitle_tracks (int n);
+ void set_caption_tracks (int n);
void set_atmos (bool s);
private:
int _width;
bool _threed;
int _audio_tracks;
- int _subtitle_tracks;
+ int _caption_tracks;
bool _atmos;
};
using boost::shared_ptr;
-TimelineTextContentView::TimelineTextContentView (Timeline& tl, shared_ptr<Content> c)
+TimelineTextContentView::TimelineTextContentView (Timeline& tl, shared_ptr<Content> c, shared_ptr<CaptionContent> caption)
: TimelineContentView (tl, c)
+ , _caption (caption)
{
}
{
shared_ptr<Content> c = _content.lock ();
DCPOMATIC_ASSERT (c);
- return c->caption && c->caption->use();
+ return _caption->use();
}
#include "timeline_content_view.h"
class TextContent;
+class CaptionContent;
/** @class TimelineTextContentView
* @brief Timeline view for TextContent.
class TimelineTextContentView : public TimelineContentView
{
public:
- TimelineTextContentView (Timeline& tl, boost::shared_ptr<Content> c);
+ TimelineTextContentView (Timeline& tl, boost::shared_ptr<Content>, boost::shared_ptr<CaptionContent>);
private:
bool active () const;
wxColour background_colour () const;
wxColour foreground_colour () const;
+
+ boost::shared_ptr<CaptionContent> _caption;
};
++count_ac;
content = i;
}
- if (i->caption && i->video_frame_rate()) {
+ if (!i->caption.empty() && i->video_frame_rate()) {
++count_sc;
content = i;
}
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->caption->set_type (CAPTION_CLOSED);
+ content->only_caption()->set_type (CAPTION_CLOSED);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
BOOST_CHECK_EQUAL (content->full_length().get(), DCPTime::from_seconds(2).get());
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<DCPDecoder> decoder (new DCPDecoder (content, film->log(), false));
- decoder->caption->PlainStart.connect (bind (store, _1));
+ decoder->only_caption()->PlainStart.connect (bind (store, _1));
stored = optional<ContentTextCaption> ();
while (!decoder->pass() && !stored) {}
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<DCPSubtitleDecoder> decoder (new DCPSubtitleDecoder (content, film->log()));
- decoder->caption->PlainStart.connect (bind (store, _1));
+ decoder->only_caption()->PlainStart.connect (bind (store, _1));
stored = optional<ContentTextCaption> ();
while (!decoder->pass ()) {
shared_ptr<DCPSubtitleDecoder> decoder (new DCPSubtitleDecoder (content, film->log()));
stored = optional<ContentTextCaption> ();
while (!decoder->pass ()) {
- decoder->caption->PlainStart.connect (bind (store, _1));
+ decoder->only_caption()->PlainStart.connect (bind (store, _1));
if (stored && stored->from() == ContentTime::from_seconds(0.08)) {
list<dcp::SubtitleString> s = stored->subs;
list<dcp::SubtitleString>::const_iterator i = s.begin ();
film->examine_and_add_content (content2);
BOOST_REQUIRE (!wait_for_jobs ());
- content->caption->add_font (shared_ptr<Font> (new Font ("font1")));
- content2->caption->add_font (shared_ptr<Font> (new Font ("font2")));
+ content->only_caption()->add_font (shared_ptr<Font> (new Font ("font1")));
+ content2->only_caption()->add_font (shared_ptr<Font> (new Font ("font2")));
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
shared_ptr<TextCaptionFileContent> s (new TextCaptionFileContent (film, "test/data/subrip2.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->caption->set_colour (dcp::Colour (255, 255, 0));
- s->caption->set_effect (dcp::SHADOW);
- s->caption->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_caption()->set_effect (dcp::SHADOW);
+ s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<TextCaptionFileContent> s (new TextCaptionFileContent (film, "test/data/subrip.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->caption->set_colour (dcp::Colour (255, 255, 0));
- s->caption->set_effect (dcp::SHADOW);
- s->caption->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_caption()->set_effect (dcp::SHADOW);
+ s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
shared_ptr<Job> job (new TranscodeJob (film));
FFmpegEncoder encoder (film, job, "build/test/ffmpeg_encoder_prores_test7.mov", FFmpegEncoder::FORMAT_PRORES, false);
shared_ptr<TextCaptionFileContent> s (new TextCaptionFileContent (film, "test/data/subrip2.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->caption->set_colour (dcp::Colour (255, 255, 0));
- s->caption->set_effect (dcp::SHADOW);
- s->caption->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_caption()->set_effect (dcp::SHADOW);
+ s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<TextCaptionFileContent> s (new TextCaptionFileContent (film, "test/data/subrip.srt"));
film->examine_and_add_content (s);
BOOST_REQUIRE (!wait_for_jobs ());
- s->caption->set_colour (dcp::Colour (255, 255, 0));
- s->caption->set_effect (dcp::SHADOW);
- s->caption->set_effect_colour (dcp::Colour (0, 255, 255));
+ s->only_caption()->set_colour (dcp::Colour (255, 255, 0));
+ s->only_caption()->set_effect (dcp::SHADOW);
+ s->only_caption()->set_effect_colour (dcp::Colour (0, 255, 255));
film->write_metadata();
shared_ptr<Job> job (new TranscodeJob (film));
shared_ptr<DCPContent> dcp (new DCPContent (film, private_data / "awkward_subs"));
film->examine_and_add_content (dcp, true);
BOOST_REQUIRE (!wait_for_jobs ());
- dcp->caption->set_use (true);
+ dcp->only_caption()->set_use (true);
shared_ptr<Player> player (new Player (film, film->playlist()));
player->set_fast ();
shared_ptr<DCPContent> dcp (new DCPContent (film, private_data / "awkward_subs2"));
film->examine_and_add_content (dcp, true);
BOOST_REQUIRE (!wait_for_jobs ());
- dcp->caption->set_use (true);
+ dcp->only_caption()->set_use (true);
shared_ptr<Player> player (new Player (film, film->playlist()));
player->set_fast ();
shared_ptr<FFmpegContent> content = dynamic_pointer_cast<FFmpegContent>(content_factory(film, private_data / "prophet_short_clip.mkv").front());
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->caption->set_burn (true);
- content->caption->set_use (true);
+ content->only_caption()->set_burn (true);
+ content->only_caption()->set_use (true);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
boost::filesystem::remove_all (film->dir (film->dcp_name(), false));
- content->caption->set_use (false);
+ content->only_caption()->set_use (false);
film->make_dcp ();
BOOST_REQUIRE (!wait_for_jobs ());
film->examine_and_add_content (content);
wait_for_jobs ();
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
/* Use test/data/subrip2.srt as if it were a font file */
- content->caption->fonts().front()->set_file (FontFiles::NORMAL, "test/data/subrip2.srt");
+ content->only_caption()->fonts().front()->set_file (FontFiles::NORMAL, "test/data/subrip2.srt");
film->make_dcp ();
wait_for_jobs ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
film->set_name ("frobozz");
film->set_interop (false);
shared_ptr<TextCaptionFileContent> content (new TextCaptionFileContent (film, "test/data/subrip2.srt"));
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->examine_and_add_content (content);
wait_for_jobs ();
film->make_dcp ();
film->set_interop (true);
film->set_sequence (false);
shared_ptr<TextCaptionFileContent> content (new TextCaptionFileContent (film, "test/data/subrip2.srt"));
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->examine_and_add_content (content);
film->examine_and_add_content (content);
wait_for_jobs ();
shared_ptr<Film> film = new_test_film2 ("srt_subtitle_test6");
film->set_interop (false);
shared_ptr<TextCaptionFileContent> content (new TextCaptionFileContent (film, "test/data/frames.srt"));
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
film->make_dcp ();
film->examine_and_add_content (content);
wait_for_jobs ();
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->make_dcp ();
wait_for_jobs ();
shared_ptr<TextCaptionFileContent> content (new TextCaptionFileContent (film, "test/data/subrip5.srt"));
film->examine_and_add_content (content);
BOOST_REQUIRE (!wait_for_jobs ());
- content->caption->set_use (true);
- content->caption->set_burn (false);
+ content->only_caption()->set_use (true);
+ content->only_caption()->set_burn (false);
film->set_reel_type (REELTYPE_BY_LENGTH);
film->set_interop (true);
film->set_reel_length (1024 * 1024 * 512);
string why_not;
BOOST_CHECK (!dcp->can_reference_video(why_not));
BOOST_CHECK (!dcp->can_reference_audio(why_not));
- BOOST_CHECK (!dcp->can_reference_subtitle(why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
/* Multi-reel DCP can be referenced if we are using by-video-content */
film->set_reel_type (REELTYPE_BY_VIDEO_CONTENT);
BOOST_CHECK (dcp->can_reference_video(why_not));
BOOST_CHECK (dcp->can_reference_audio(why_not));
- /* (but reels_test2 has no subtitles to reference) */
- BOOST_CHECK (!dcp->can_reference_subtitle(why_not));
+ /* (but reels_test2 has no captions to reference) */
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
shared_ptr<FFmpegContent> other (new FFmpegContent (film, "test/data/test.mp4"));
film->examine_and_add_content (other);
other->set_position (DCPTime (0));
BOOST_CHECK (!dcp->can_reference_video(why_not));
BOOST_CHECK (!dcp->can_reference_audio(why_not));
- BOOST_CHECK (!dcp->can_reference_subtitle(why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
/* This should not be considered an overlap */
other->set_position (dcp->end ());
BOOST_CHECK (dcp->can_reference_video(why_not));
BOOST_CHECK (dcp->can_reference_audio(why_not));
- /* (reels_test2 has no subtitles to reference) */
- BOOST_CHECK (!dcp->can_reference_subtitle(why_not));
+ /* (reels_test2 has no captions to reference) */
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_OPEN, why_not));
+ BOOST_CHECK (!dcp->can_reference_caption(CAPTION_CLOSED, why_not));
}
/** Make a OV with video and audio and a VF referencing the OV and adding subs */