_name = examiner->name ();
for (int i = 0; i < TEXT_COUNT; ++i) {
for (int j = 0; j < examiner->text_count(static_cast<TextType>(i)); ++j) {
- text.push_back (shared_ptr<TextContent>(new TextContent(this, static_cast<TextType>(i), static_cast<TextType>(i))));
+ shared_ptr<TextContent> c(new TextContent(this, static_cast<TextType>(i), static_cast<TextType>(i)));
+ if (i == TEXT_CLOSED_CAPTION) {
+ c->set_dcp_track (examiner->dcp_text_track(j));
+ }
+ text.push_back (c);
}
}
texts = text.size ();
}
_text_count[TEXT_CLOSED_CAPTION]++;
+ _dcp_text_tracks.push_back (DCPTextTrack(j->annotation_text(), j->language().get_value_or(_("Unknown"))));
}
if (i->main_picture()) {
#include "video_examiner.h"
#include "audio_examiner.h"
#include "dcp.h"
+#include "dcp_text_track.h"
+#include "dcpomatic_assert.h"
+#include <dcp/dcp_time.h>
class DCPContent;
return _text_count[type];
}
+ DCPTextTrack dcp_text_track (int i) const {
+ DCPOMATIC_ASSERT (i >= 0 && i < static_cast<int>(_dcp_text_tracks.size()));
+ return _dcp_text_tracks[i];
+ }
+
bool kdm_valid () const {
return _kdm_valid;
}
bool _has_audio;
/** number of different assets of each type (OCAP/CCAP) */
int _text_count[TEXT_COUNT];
+ /** the DCPTextTracks for each of our CCAPs */
+ std::vector<DCPTextTrack> _dcp_text_tracks;
bool _encrypted;
bool _needs_assets;
bool _kdm_valid;
/* Keep about 1 second's worth of history samples */
_latency_history_count = _film->audio_frame_rate() / _audio_block_size;
+ _closed_captions_dialog->update_tracks (_film);
+
recreate_butler ();
calculate_sizes ();