class ContentText
{
public:
- explicit ContentText (ContentTime f, TextType t)
+ explicit ContentText (ContentTime f)
: _from (f)
- , _type (t)
{}
ContentTime from () const {
return _from;
}
- TextType type () const {
- return _type;
- }
-
private:
ContentTime _from;
- TextType _type;
};
class ContentBitmapText : public ContentText
{
public:
- ContentBitmapText (ContentTime f, TextType type, boost::shared_ptr<Image> im, dcpomatic::Rect<double> r)
- : ContentText (f, type)
+ ContentBitmapText (ContentTime f, boost::shared_ptr<Image> im, dcpomatic::Rect<double> r)
+ : ContentText (f)
, sub (im, r)
{}
class ContentStringText : public ContentText
{
public:
- ContentStringText (ContentTime f, TextType type, std::list<dcp::SubtitleString> s)
- : ContentText (f, type)
+ ContentStringText (ContentTime f, std::list<dcp::SubtitleString> s)
+ : ContentText (f)
, subs (s)
{}
bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
);
(*j)->Stop.connect (
- bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
+ bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
);
++j;
ps.bitmap.push_back (subtitle.sub);
DCPTime from (content_time_to_dcp (piece, subtitle.from()));
- _active_texts[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[text->type()].add_from (wc, ps, from);
}
void
ps.add_fonts (text->fonts ());
}
- _active_texts[subtitle.type()].add_from (wc, ps, from);
+ _active_texts[text->type()].add_from (wc, ps, from);
}
void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
+Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
{
- if (!_active_texts[type].have (wc)) {
+ shared_ptr<const TextContent> text = wc.lock ();
+ if (!text) {
+ return;
+ }
+
+ if (!_active_texts[text->type()].have(wc)) {
return;
}
shared_ptr<Piece> piece = wp.lock ();
- shared_ptr<const TextContent> text = wc.lock ();
- if (!piece || !text) {
+ if (!piece) {
return;
}
return;
}
- pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
+ pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
- bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
+ bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
if (text->use() && !always && !text->burn()) {
- Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
+ Text (from.first, text->type(), DCPTimePeriod (from.second, dcp_to));
}
}
void audio (boost::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
void bitmap_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<const TextContent>, ContentBitmapText);
void plain_text_start (boost::weak_ptr<Piece>, boost::weak_ptr<const TextContent>, ContentStringText);
- void subtitle_stop (boost::weak_ptr<Piece>, boost::weak_ptr<const TextContent>, ContentTime, TextType);
+ void subtitle_stop (boost::weak_ptr<Piece>, boost::weak_ptr<const TextContent>, ContentTime);
DCPTime one_video_frame () const;
void fill_audio (DCPTimePeriod period);
std::pair<boost::shared_ptr<AudioBuffers>, DCPTime> discard_audio (
template <class T>
void
-maybe_add_captions (
+maybe_add_text (
shared_ptr<dcp::SubtitleAsset> asset,
int64_t picture_duration,
shared_ptr<dcp::Reel> reel,
}
reel->add (reel_sound_asset);
- maybe_add_captions<dcp::ReelSubtitleAsset> (_caption_asset[TEXT_OPEN_SUBTITLE], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
- maybe_add_captions<dcp::ReelClosedCaptionAsset> (_caption_asset[TEXT_CLOSED_CAPTION], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
+ maybe_add_text<dcp::ReelSubtitleAsset> (_text_asset[TEXT_OPEN_SUBTITLE], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
+ maybe_add_text<dcp::ReelClosedCaptionAsset> (_text_asset[TEXT_CLOSED_CAPTION], reel_picture_asset->duration(), reel, refs, fonts, _film, _period);
return reel;
}
void
ReelWriter::write (PlayerText subs, TextType type, DCPTimePeriod period)
{
- if (!_caption_asset[type]) {
+ if (!_text_asset[type]) {
string lang = _film->subtitle_language ();
if (lang.empty ()) {
lang = "Unknown";
s->set_movie_title (_film->name ());
s->set_language (lang);
s->set_reel_number (raw_convert<string> (_reel_index + 1));
- _caption_asset[type] = s;
+ _text_asset[type] = s;
} else {
shared_ptr<dcp::SMPTESubtitleAsset> s (new dcp::SMPTESubtitleAsset ());
s->set_content_title_text (_film->name ());
if (_film->encrypted ()) {
s->set_key (_film->key ());
}
- _caption_asset[type] = s;
+ _text_asset[type] = s;
}
}
/* XXX: couldn't / shouldn't we use period here rather than getting time from the subtitle? */
i.set_in (i.in() - dcp::Time (_period.from.seconds(), i.in().tcr));
i.set_out (i.out() - dcp::Time (_period.from.seconds(), i.out().tcr));
- _caption_asset[type]->add (shared_ptr<dcp::Subtitle>(new dcp::SubtitleString(i)));
+ _text_asset[type]->add (shared_ptr<dcp::Subtitle>(new dcp::SubtitleString(i)));
}
BOOST_FOREACH (BitmapText i, subs.bitmap) {
- _caption_asset[type]->add (
+ _text_asset[type]->add (
shared_ptr<dcp::Subtitle>(
new dcp::SubtitleImage(
i.image->as_png(),
boost::shared_ptr<dcp::PictureAssetWriter> _picture_asset_writer;
boost::shared_ptr<dcp::SoundAsset> _sound_asset;
boost::shared_ptr<dcp::SoundAssetWriter> _sound_asset_writer;
- boost::shared_ptr<dcp::SubtitleAsset> _caption_asset[TEXT_COUNT];
+ boost::shared_ptr<dcp::SubtitleAsset> _text_asset[TEXT_COUNT];
static int const _info_size;
};
void
TextDecoder::emit_bitmap_start (ContentTime from, shared_ptr<Image> image, dcpomatic::Rect<double> rect)
{
- BitmapStart (ContentBitmapText (from, _content->type(), image, rect));
+ BitmapStart (ContentBitmapText (from, image, rect));
_position = from;
}
}
}
- PlainStart (ContentStringText (from, _content->type(), s));
+ PlainStart (ContentStringText (from, s));
_position = from;
}
void
TextDecoder::emit_stop (ContentTime to)
{
- Stop (to, _content->type());
+ Stop (to);
}
void
boost::signals2::signal<void (ContentBitmapText)> BitmapStart;
boost::signals2::signal<void (ContentStringText)> PlainStart;
- boost::signals2::signal<void (ContentTime, TextType)> Stop;
+ boost::signals2::signal<void (ContentTime)> Stop;
private:
boost::shared_ptr<const TextContent> _content;
*/
_audio_reel = _reels.begin ();
for (int i = 0; i < TEXT_COUNT; ++i) {
- _caption_reel[i] = _reels.begin ();
+ _text_reel[i] = _reels.begin ();
}
/* Check that the signer is OK if we need one */
void
Writer::write (PlayerText text, TextType type, DCPTimePeriod period)
{
- while (_caption_reel[type]->period().to <= period.from) {
- ++_caption_reel[type];
- DCPOMATIC_ASSERT (_caption_reel[type] != _reels.end());
+ while (_text_reel[type]->period().to <= period.from) {
+ ++_text_reel[type];
+ DCPOMATIC_ASSERT (_text_reel[type] != _reels.end());
}
- DCPOMATIC_ASSERT (_caption_reel[type] != _reels.end());
+ DCPOMATIC_ASSERT (_text_reel[type] != _reels.end());
- _caption_reel[type]->write (text, type, period);
+ _text_reel[type]->write (text, type, period);
}
void
boost::weak_ptr<Job> _job;
std::vector<ReelWriter> _reels;
std::vector<ReelWriter>::iterator _audio_reel;
- std::vector<ReelWriter>::iterator _caption_reel[TEXT_COUNT];
+ std::vector<ReelWriter>::iterator _text_reel[TEXT_COUNT];
/** our thread, or 0 */
boost::thread* _thread;
++n;
}
- if (many) {
+ if (!selected || many) {
_dcp_track->SetSelection (wxNOT_FOUND);
}
}
_type->SetSelection (0);
}
setup_sensitivity ();
+ update_dcp_track_selection ();
} else if (property == TextContentProperty::BURN) {
checked_set (_burn, text ? text->burn() : false);
} else if (property == TextContentProperty::X_OFFSET) {
_y_scale->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
_line_spacing->Enable (!reference && use && type == TEXT_OPEN_SUBTITLE);
_dcp_track->Enable (!reference && any_subs > 0 && use && type == TEXT_CLOSED_CAPTION);
- _language->Enable (!reference && any_subs > 0 && use);
+ _language->Enable (!reference && any_subs > 0 && use && type == TEXT_OPEN_SUBTITLE);
_stream->Enable (!reference && ffmpeg_subs == 1);
_text_view_button->Enable (!reference);
_fonts_dialog_button->Enable (!reference && type == TEXT_OPEN_SUBTITLE);
BOOST_REQUIRE_EQUAL (check.cpls().front()->reels().size(), 1);
BOOST_REQUIRE (!check.cpls().front()->reels().front()->closed_captions().empty());
}
+
+/** Test multiple closed captions */
+BOOST_AUTO_TEST_CASE (closed_caption_test2)
+{
+ shared_ptr<Film> film = new_test_film2 ("closed_caption_test2");
+ shared_ptr<StringTextFileContent> content1 (new StringTextFileContent (film, "test/data/subrip.srt"));
+ film->examine_and_add_content (content1);
+ shared_ptr<StringTextFileContent> content2 (new StringTextFileContent (film, "test/data/subrip2.srt"));
+ film->examine_and_add_content (content2);
+ shared_ptr<StringTextFileContent> content3 (new StringTextFileContent (film, "test/data/subrip3.srt"));
+ film->examine_and_add_content (content3);
+ BOOST_REQUIRE (!wait_for_jobs ());
+
+ content1->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content1->only_text()->set_dcp_track (DCPTextTrack("First track", "French"));
+ content2->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content2->only_text()->set_dcp_track (DCPTextTrack("Second track", "German"));
+ content3->only_text()->set_type (TEXT_CLOSED_CAPTION);
+ content3->only_text()->set_dcp_track (DCPTextTrack("Third track", "Italian"));
+
+ film->make_dcp ();
+ BOOST_REQUIRE (!wait_for_jobs ());
+
+ dcp::DCP check (film->dir(film->dcp_name()));
+ check.read ();
+
+ BOOST_REQUIRE_EQUAL (check.cpls().size(), 1);
+ BOOST_REQUIRE_EQUAL (check.cpls().front()->reels().size(), 1);
+ BOOST_REQUIRE_EQUAL (!check.cpls().front()->reels().front()->closed_captions().size(), 3);
+}