Translating DVD-o-matic
-----------------------
-1. Run ./waf po
+1. Run ./waf pot
This will generate build/src/lib/dvdomatic.pot and build/src/wx/libdvdomatic-wx.pot.
import os
from waflib import Logs
-def pot(dir, sources, name):
+def pot(dir, sources, name, all = False):
s = ""
for f in sources.split('\n'):
t = f.strip()
if len(t) > 0:
s += (os.path.join(dir, t)) + " "
- Logs.info('Making %s.pot' % os.path.join('build', dir, name))
- os.system('xgettext -d %s -s --keyword=_ -p %s -o %s.pot %s' % (name, os.path.join('build', dir), name, s))
+ if all:
+ Logs.info('Making %s.pot (extracting all)' % os.path.join('build', dir, name))
+ os.system('xgettext -d %s -s --extract-all -p %s -o %s.pot %s' % (name, os.path.join('build', dir), name, s))
+ else:
+ Logs.info('Making %s.pot' % os.path.join('build', dir, name))
+ os.system('xgettext -d %s -s --keyword=_ --add-comments=/ -p %s -o %s.pot %s' % (name, os.path.join('build', dir), name, s))
def po_to_mo(dir, name):
elif [ "$1" == "--valgrind" ]; then
shift
valgrind --tool="memcheck" build/src/tools/dvdomatic $*
+elif [ "$1" == "--i18n" ]; then
+ shift
+ LANG=fr_FR.UTF8 build/src/tools/dvdomatic "$*"
else
build/src/tools/dvdomatic "$*"
fi
#include "config.h"
#include "encoder.h"
+#include "i18n.h"
+
using std::string;
using boost::shared_ptr;
string
ABTranscodeJob::name () const
{
- return String::compose ("A/B transcode %1", _film->name());
+ return String::compose (_("A/B transcode %1"), _film->name());
}
void
#include "filter.h"
#include "sound_processor.h"
+#include "i18n.h"
+
using std::vector;
using std::ifstream;
using std::string;
Config::Config ()
: _num_local_encoding_threads (2)
, _server_port (6192)
- , _reference_scaler (Scaler::from_id ("bicubic"))
- , _tms_path (".")
- , _sound_processor (SoundProcessor::from_id ("dolby_cp750"))
+ , _reference_scaler (Scaler::from_id (N_("bicubic")))
+ , _tms_path (N_("."))
+ , _sound_processor (SoundProcessor::from_id (N_("dolby_cp750")))
{
_allowed_dcp_frame_rates.push_back (24);
_allowed_dcp_frame_rates.push_back (25);
string const k = line.substr (0, s);
string const v = line.substr (s + 1);
- if (k == "num_local_encoding_threads") {
+ if (k == N_("num_local_encoding_threads")) {
_num_local_encoding_threads = atoi (v.c_str ());
- } else if (k == "default_directory") {
+ } else if (k == N_("default_directory")) {
_default_directory = v;
- } else if (k == "server_port") {
+ } else if (k == N_("server_port")) {
_server_port = atoi (v.c_str ());
- } else if (k == "reference_scaler") {
+ } else if (k == N_("reference_scaler")) {
_reference_scaler = Scaler::from_id (v);
- } else if (k == "reference_filter") {
+ } else if (k == N_("reference_filter")) {
_reference_filters.push_back (Filter::from_id (v));
- } else if (k == "server") {
+ } else if (k == N_("server")) {
_servers.push_back (ServerDescription::create_from_metadata (v));
- } else if (k == "tms_ip") {
+ } else if (k == N_("tms_ip")) {
_tms_ip = v;
- } else if (k == "tms_path") {
+ } else if (k == N_("tms_path")) {
_tms_path = v;
- } else if (k == "tms_user") {
+ } else if (k == N_("tms_user")) {
_tms_user = v;
- } else if (k == "tms_password") {
+ } else if (k == N_("tms_password")) {
_tms_password = v;
- } else if (k == "sound_processor") {
+ } else if (k == N_("sound_processor")) {
_sound_processor = SoundProcessor::from_id (v);
}
{
boost::filesystem::path p;
p /= g_get_user_config_dir ();
- p /= ".dvdomatic";
+ p /= N_(".dvdomatic");
return p.string ();
}
Config::write () const
{
ofstream f (file().c_str ());
- f << "num_local_encoding_threads " << _num_local_encoding_threads << "\n"
- << "default_directory " << _default_directory << "\n"
- << "server_port " << _server_port << "\n"
- << "reference_scaler " << _reference_scaler->id () << "\n";
+ f << N_("num_local_encoding_threads ") << _num_local_encoding_threads << N_("\n")
+ << N_("default_directory ") << _default_directory << N_("\n")
+ << N_("server_port ") << _server_port << N_("\n")
+ << N_("reference_scaler ") << _reference_scaler->id () << N_("\n");
for (vector<Filter const *>::const_iterator i = _reference_filters.begin(); i != _reference_filters.end(); ++i) {
- f << "reference_filter " << (*i)->id () << "\n";
+ f << N_("reference_filter ") << (*i)->id () << N_("\n");
}
for (vector<ServerDescription*>::const_iterator i = _servers.begin(); i != _servers.end(); ++i) {
- f << "server " << (*i)->as_metadata () << "\n";
+ f << N_("server ") << (*i)->as_metadata () << N_("\n");
}
- f << "tms_ip " << _tms_ip << "\n";
- f << "tms_path " << _tms_path << "\n";
- f << "tms_user " << _tms_user << "\n";
- f << "tms_password " << _tms_password << "\n";
- f << "sound_processor " << _sound_processor->id () << "\n";
+ f << N_("tms_ip ") << _tms_ip << N_("\n");
+ f << N_("tms_path ") << _tms_path << N_("\n");
+ f << N_("tms_user ") << _tms_user << N_("\n");
+ f << N_("tms_password ") << _tms_password << N_("\n");
+ f << N_("sound_processor ") << _sound_processor->id () << N_("\n");
_default_dci_metadata.write (f);
}
#include <iostream>
#include "dci_metadata.h"
+#include "i18n.h"
+
using namespace std;
void
DCIMetadata::write (ostream& f) const
{
- f << "audio_language " << audio_language << "\n";
- f << "subtitle_language " << subtitle_language << "\n";
- f << "territory " << territory << "\n";
- f << "rating " << rating << "\n";
- f << "studio " << studio << "\n";
- f << "facility " << facility << "\n";
- f << "package_type " << package_type << "\n";
+ f << N_("audio_language ") << audio_language << N_("\n");
+ f << N_("subtitle_language ") << subtitle_language << N_("\n");
+ f << N_("territory ") << territory << N_("\n");
+ f << N_("rating ") << rating << N_("\n");
+ f << N_("studio ") << studio << N_("\n");
+ f << N_("facility ") << facility << N_("\n");
+ f << N_("package_type ") << package_type << N_("\n");
}
void
DCIMetadata::read (string k, string v)
{
- if (k == "audio_language") {
+ if (k == N_("audio_language")) {
audio_language = v;
- } else if (k == "subtitle_language") {
+ } else if (k == N_("subtitle_language")) {
subtitle_language = v;
- } else if (k == "territory") {
+ } else if (k == N_("territory")) {
territory = v;
- } else if (k == "rating") {
+ } else if (k == N_("rating")) {
rating = v;
- } else if (k == "studio") {
+ } else if (k == N_("studio")) {
studio = v;
- } else if (k == "facility") {
+ } else if (k == N_("facility")) {
facility = v;
- } else if (k == "package_type") {
+ } else if (k == N_("package_type")) {
package_type = v;
}
}
#include <cassert>
#include "dcp_content_type.h"
+#include "i18n.h"
+
using namespace std;
vector<DCPContentType const *> DCPContentType::_dcp_content_types;
void
DCPContentType::setup_dcp_content_types ()
{
- _dcp_content_types.push_back (new DCPContentType ("Feature", libdcp::FEATURE, "FTR"));
- _dcp_content_types.push_back (new DCPContentType ("Short", libdcp::SHORT, "SHR"));
- _dcp_content_types.push_back (new DCPContentType ("Trailer", libdcp::TRAILER, "TLR"));
- _dcp_content_types.push_back (new DCPContentType ("Test", libdcp::TEST, "TST"));
- _dcp_content_types.push_back (new DCPContentType ("Transitional", libdcp::TRANSITIONAL, "XSN"));
- _dcp_content_types.push_back (new DCPContentType ("Rating", libdcp::RATING, "RTG"));
- _dcp_content_types.push_back (new DCPContentType ("Teaser", libdcp::TEASER, "TSR"));
- _dcp_content_types.push_back (new DCPContentType ("Policy", libdcp::POLICY, "POL"));
- _dcp_content_types.push_back (new DCPContentType ("Public Service Announcement", libdcp::PUBLIC_SERVICE_ANNOUNCEMENT, "PSA"));
- _dcp_content_types.push_back (new DCPContentType ("Advertisement", libdcp::ADVERTISEMENT, "ADV"));
+ _dcp_content_types.push_back (new DCPContentType (_("Feature"), libdcp::FEATURE, N_("FTR")));
+ _dcp_content_types.push_back (new DCPContentType (_("Short"), libdcp::SHORT, N_("SHR")));
+ _dcp_content_types.push_back (new DCPContentType (_("Trailer"), libdcp::TRAILER, N_("TLR")));
+ _dcp_content_types.push_back (new DCPContentType (_("Test"), libdcp::TEST, N_("TST")));
+ _dcp_content_types.push_back (new DCPContentType (_("Transitional"), libdcp::TRANSITIONAL, N_("XSN")));
+ _dcp_content_types.push_back (new DCPContentType (_("Rating"), libdcp::RATING, N_("RTG")));
+ _dcp_content_types.push_back (new DCPContentType (_("Teaser"), libdcp::TEASER, N_("TSR")));
+ _dcp_content_types.push_back (new DCPContentType (_("Policy"), libdcp::POLICY, N_("POL")));
+ _dcp_content_types.push_back (new DCPContentType (_("Public Service Announcement"), libdcp::PUBLIC_SERVICE_ANNOUNCEMENT, N_("PSA")));
+ _dcp_content_types.push_back (new DCPContentType (_("Advertisement"), libdcp::ADVERTISEMENT, N_("ADV")));
}
DCPContentType const *
#include "log.h"
#include "subtitle.h"
+#include "i18n.h"
+
using std::string;
using std::stringstream;
using std::ofstream;
_image = opj_image_create (3, &_cmptparm[0], CLRSPC_SRGB);
if (_image == 0) {
- throw EncodeError ("could not create libopenjpeg image");
+ throw EncodeError (N_("could not create libopenjpeg image"));
}
_image->x0 = 0;
_parameters->tcp_numlayers++;
_parameters->cp_disto_alloc = 1;
_parameters->cp_rsiz = CINEMA2K;
- _parameters->cp_comment = strdup ("DVD-o-matic");
+ _parameters->cp_comment = strdup (N_("DVD-o-matic"));
_parameters->cp_cinema = CINEMA2K_24;
/* 3 components, so use MCT */
/* get a J2K compressor handle */
_cinfo = opj_create_compress (CODEC_J2K);
if (_cinfo == 0) {
- throw EncodeError ("could not create JPEG2000 encoder");
+ throw EncodeError (N_("could not create JPEG2000 encoder"));
}
/* Set event manager to null (openjpeg 1.3 bug) */
_cio = opj_cio_open ((opj_common_ptr) _cinfo, 0, 0);
if (_cio == 0) {
- throw EncodeError ("could not open JPEG2000 stream");
+ throw EncodeError (N_("could not open JPEG2000 stream"));
}
int const r = opj_encode (_cinfo, _cio, _image, 0);
if (r == 0) {
- throw EncodeError ("JPEG2000 encoding failed");
+ throw EncodeError (N_("JPEG2000 encoding failed"));
}
- _log->log (String::compose ("Finished locally-encoded frame %1", _frame));
+ _log->log (String::compose (N_("Finished locally-encoded frame %1"), _frame));
return shared_ptr<EncodedData> (new LocallyEncodedData (_cio->buffer, cio_tell (_cio)));
}
socket->connect (*endpoint_iterator);
stringstream s;
- s << "encode please\n"
- << "input_width " << _input->size().width << "\n"
- << "input_height " << _input->size().height << "\n"
- << "input_pixel_format " << _input->pixel_format() << "\n"
- << "output_width " << _out_size.width << "\n"
- << "output_height " << _out_size.height << "\n"
- << "padding " << _padding << "\n"
- << "subtitle_offset " << _subtitle_offset << "\n"
- << "subtitle_scale " << _subtitle_scale << "\n"
- << "scaler " << _scaler->id () << "\n"
- << "frame " << _frame << "\n"
- << "frames_per_second " << _frames_per_second << "\n";
+ s << N_("encode please\n")
+ << N_("input_width ") << _input->size().width << N_("\n")
+ << N_("input_height ") << _input->size().height << N_("\n")
+ << N_("input_pixel_format ") << _input->pixel_format() << N_("\n")
+ << N_("output_width ") << _out_size.width << N_("\n")
+ << N_("output_height ") << _out_size.height << N_("\n")
+ << N_("padding ") << _padding << N_("\n")
+ << N_("subtitle_offset ") << _subtitle_offset << N_("\n")
+ << N_("subtitle_scale ") << _subtitle_scale << N_("\n")
+ << N_("scaler ") << _scaler->id () << N_("\n")
+ << N_("frame ") << _frame << N_("\n")
+ << N_("frames_per_second ") << _frames_per_second << N_("\n");
if (!_post_process.empty()) {
- s << "post_process " << _post_process << "\n";
+ s << N_("post_process ") << _post_process << N_("\n");
}
- s << "colour_lut " << _colour_lut << "\n"
- << "j2k_bandwidth " << _j2k_bandwidth << "\n";
+ s << N_("colour_lut ") << _colour_lut << N_("\n")
+ << N_("j2k_bandwidth ") << _j2k_bandwidth << N_("\n");
if (_subtitle) {
- s << "subtitle_x " << _subtitle->position().x << "\n"
- << "subtitle_y " << _subtitle->position().y << "\n"
- << "subtitle_width " << _subtitle->image()->size().width << "\n"
- << "subtitle_height " << _subtitle->image()->size().height << "\n";
+ s << N_("subtitle_x ") << _subtitle->position().x << N_("\n")
+ << N_("subtitle_y ") << _subtitle->position().y << N_("\n")
+ << N_("subtitle_width ") << _subtitle->image()->size().width << N_("\n")
+ << N_("subtitle_height ") << _subtitle->image()->size().height << N_("\n");
}
_log->log (String::compose (
- "Sending to remote; pixel format %1, components %2, lines (%3,%4,%5), line sizes (%6,%7,%8)",
+ N_("Sending to remote; pixel format %1, components %2, lines (%3,%4,%5), line sizes (%6,%7,%8)"),
_input->pixel_format(), _input->components(),
_input->lines(0), _input->lines(1), _input->lines(2),
_input->line_size()[0], _input->line_size()[1], _input->line_size()[2]
shared_ptr<EncodedData> e (new RemotelyEncodedData (socket->read_uint32 ()));
socket->read (e->data(), e->size());
- _log->log (String::compose ("Finished remotely-encoded frame %1", _frame));
+ _log->log (String::compose (N_("Finished remotely-encoded frame %1"), _frame));
return e;
}
_size = boost::filesystem::file_size (file);
_data = new uint8_t[_size];
- FILE* f = fopen (file.c_str(), "rb");
+ FILE* f = fopen (file.c_str(), N_("rb"));
if (!f) {
- throw FileError ("could not open file for reading", file);
+ throw FileError (_("could not open file for reading"), file);
}
fread (_data, 1, _size, f);
{
string const tmp_j2c = film->j2c_path (frame, true);
- FILE* f = fopen (tmp_j2c.c_str (), "wb");
+ FILE* f = fopen (tmp_j2c.c_str (), N_("wb"));
if (!f) {
throw WriteFileError (tmp_j2c, errno);
#include "subtitle.h"
#include "filter_graph.h"
+#include "i18n.h"
+
using std::string;
using std::stringstream;
using std::min;
bool
Decoder::seek (double)
{
- throw DecodeError ("decoder does not support seek");
+ throw DecodeError (N_("decoder does not support seek"));
}
/** Seek so that the next frame we will produce is the same as the last one.
bool
Decoder::seek_to_last ()
{
- throw DecodeError ("decoder does not support seek");
+ throw DecodeError (N_("decoder does not support seek"));
}
#include "dolby_cp750.h"
+#include "i18n.h"
+
using namespace std;
DolbyCP750::DolbyCP750 ()
- : SoundProcessor ("dolby_cp750", "Dolby CP750")
+ : SoundProcessor ("dolby_cp750", _("Dolby CP750"))
{
}
#include "cross.h"
#include "writer.h"
+#include "i18n.h"
+
using std::pair;
using std::string;
using std::stringstream;
#ifdef HAVE_SWRESAMPLE
stringstream s;
- s << "Will resample audio from " << _film->audio_stream()->sample_rate() << " to " << _film->target_audio_sample_rate();
+ s << String::compose (N_("Will resample audio from %1 to %2"), _film->audio_stream()->sample_rate(), _film->target_audio_sample_rate());
_film->log()->log (s.str ());
/* We will be using planar float data when we call the resampler */
swr_init (_swr_context);
#else
- throw EncodeError ("Cannot resample audio as libswresample is not present");
+ throw EncodeError (_("Cannot resample audio as libswresample is not present"));
#endif
} else {
#ifdef HAVE_SWRESAMPLE
int const frames = swr_convert (_swr_context, (uint8_t **) out->data(), 256, 0, 0);
if (frames < 0) {
- throw EncodeError ("could not run sample-rate converter");
+ throw EncodeError (_("could not run sample-rate converter"));
}
if (frames == 0) {
boost::mutex::scoped_lock lock (_mutex);
- _film->log()->log ("Clearing queue of " + lexical_cast<string> (_queue.size ()));
+ _film->log()->log (String::compose (N_("Clearing queue of %1"), _queue.size ()));
/* Keep waking workers until the queue is empty */
while (!_queue.empty ()) {
- _film->log()->log ("Waking with " + lexical_cast<string> (_queue.size ()), Log::VERBOSE);
+ _film->log()->log (String::compose (N_("Waking with %1"), _queue.size ()), Log::VERBOSE);
_condition.notify_all ();
_condition.wait (lock);
}
terminate_threads ();
- _film->log()->log ("Mopping up " + lexical_cast<string> (_queue.size()));
+ _film->log()->log (String::compose (N_("Mopping up %1"), _queue.size()));
/* The following sequence of events can occur in the above code:
1. a remote worker takes the last image off the queue
*/
for (list<shared_ptr<DCPVideoFrame> >::iterator i = _queue.begin(); i != _queue.end(); ++i) {
- _film->log()->log (String::compose ("Encode left-over frame %1", (*i)->frame ()));
+ _film->log()->log (String::compose (N_("Encode left-over frame %1"), (*i)->frame ()));
try {
_writer->write ((*i)->encode_locally(), (*i)->frame ());
frame_done ();
} catch (std::exception& e) {
- _film->log()->log (String::compose ("Local encode failed (%1)", e.what ()));
+ _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
}
/* Wait until the queue has gone down a bit */
while (_queue.size() >= _threads.size() * 2 && !_terminate) {
- TIMING ("decoder sleeps with queue of %1", _queue.size());
+ TIMING (_("decoder sleeps with queue of %1"), _queue.size());
_condition.wait (lock);
- TIMING ("decoder wakes with queue of %1", _queue.size());
+ TIMING (_("decoder wakes with queue of %1"), _queue.size());
}
if (_terminate) {
} else {
/* Queue this new frame for encoding */
pair<string, string> const s = Filter::ffmpeg_strings (_film->filters());
- TIMING ("adding to queue of %1", _queue.size ());
+ TIMING (_("adding to queue of %1"), _queue.size ());
_queue.push_back (boost::shared_ptr<DCPVideoFrame> (
new DCPVideoFrame (
image, sub, _film->format()->dcp_size(), _film->format()->dcp_padding (_film),
);
if (resampled_frames < 0) {
- throw EncodeError ("could not run sample-rate converter");
+ throw EncodeError (_("could not run sample-rate converter"));
}
resampled->set_frames (resampled_frames);
while (1) {
- TIMING ("encoder thread %1 sleeps", boost::this_thread::get_id());
+ TIMING (N_("encoder thread %1 sleeps"), boost::this_thread::get_id());
boost::mutex::scoped_lock lock (_mutex);
while (_queue.empty () && !_terminate) {
_condition.wait (lock);
return;
}
- TIMING ("encoder thread %1 wakes with queue of %2", boost::this_thread::get_id(), _queue.size());
+ TIMING (N_("encoder thread %1 wakes with queue of %2"), boost::this_thread::get_id(), _queue.size());
boost::shared_ptr<DCPVideoFrame> vf = _queue.front ();
- _film->log()->log (String::compose ("Encoder thread %1 pops frame %2 from queue", boost::this_thread::get_id(), vf->frame()), Log::VERBOSE);
+ _film->log()->log (String::compose (N_("Encoder thread %1 pops frame %2 from queue"), boost::this_thread::get_id(), vf->frame()), Log::VERBOSE);
_queue.pop_front ();
lock.unlock ();
encoded = vf->encode_remotely (server);
if (remote_backoff > 0) {
- _film->log()->log (String::compose ("%1 was lost, but now she is found; removing backoff", server->host_name ()));
+ _film->log()->log (String::compose (N_("%1 was lost, but now she is found; removing backoff"), server->host_name ()));
}
/* This job succeeded, so remove any backoff */
}
_film->log()->log (
String::compose (
- "Remote encode of %1 on %2 failed (%3); thread sleeping for %4s",
+ N_("Remote encode of %1 on %2 failed (%3); thread sleeping for %4s"),
vf->frame(), server->host_name(), e.what(), remote_backoff)
);
}
} else {
try {
- TIMING ("encoder thread %1 begins local encode of %2", boost::this_thread::get_id(), vf->frame());
+ TIMING (N_("encoder thread %1 begins local encode of %2"), boost::this_thread::get_id(), vf->frame());
encoded = vf->encode_locally ();
- TIMING ("encoder thread %1 finishes local encode of %2", boost::this_thread::get_id(), vf->frame());
+ TIMING (N_("encoder thread %1 finishes local encode of %2"), boost::this_thread::get_id(), vf->frame());
} catch (std::exception& e) {
- _film->log()->log (String::compose ("Local encode failed (%1)", e.what ()));
+ _film->log()->log (String::compose (N_("Local encode failed (%1)"), e.what ()));
}
}
} else {
lock.lock ();
_film->log()->log (
- String::compose ("Encoder thread %1 pushes frame %2 back onto queue after failure", boost::this_thread::get_id(), vf->frame())
+ String::compose (N_("Encoder thread %1 pushes frame %2 back onto queue after failure"), boost::this_thread::get_id(), vf->frame())
);
_queue.push_front (vf);
lock.unlock ();
#include "film.h"
#include "video_decoder.h"
+#include "i18n.h"
+
using std::string;
using std::vector;
using std::pair;
ExamineContentJob::name () const
{
if (_film->name().empty ()) {
- return "Examine content";
+ return _("Examine content");
}
- return String::compose ("Examine content of %1", _film->name());
+ return String::compose (_("Examine content of %1"), _film->name());
}
void
_film->set_length (decoders.video->video_frame());
- _film->log()->log (String::compose ("Video length examined as %1 frames", _film->length().get()));
+ _film->log()->log (String::compose (N_("Video length examined as %1 frames"), _film->length().get()));
} else {
Decoders d = decoder_factory (_film, DecodeOptions());
_film->set_length (d.video->length());
- _film->log()->log (String::compose ("Video length obtained from header as %1 frames", _film->length().get()));
+ _film->log()->log (String::compose (N_("Video length obtained from header as %1 frames"), _film->length().get()));
}
ascend ();
#include "film.h"
#include "exceptions.h"
+#include "i18n.h"
+
using std::vector;
using std::string;
using std::stringstream;
SF_INFO info;
SNDFILE* s = sf_open (files[i].c_str(), SFM_READ, &info);
if (!s) {
- throw DecodeError ("could not open external audio file for reading");
+ throw DecodeError (_("could not open external audio file for reading"));
}
if (info.channels != 1) {
- throw DecodeError ("external audio files must be mono");
+ throw DecodeError (_("external audio files must be mono"));
}
sndfiles.push_back (s);
first = false;
} else {
if (info.frames != frames) {
- throw DecodeError ("external audio files have differing lengths");
+ throw DecodeError (_("external audio files have differing lengths"));
}
}
}
stringstream s (t);
string type;
s >> type;
- if (type != "external") {
+ if (type != N_("external")) {
return shared_ptr<ExternalAudioStream> ();
}
string
ExternalAudioStream::to_string () const
{
- return String::compose ("external %1 %2", _sample_rate, _channel_layout);
+ return String::compose (N_("external %1 %2"), _sample_rate, _channel_layout);
}
}
#include "exceptions.h"
+#include "i18n.h"
+
#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
typedef struct {
#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
/* XXX does this leak stuff? */
AVFilter* buffer_sink = new AVFilter;
- buffer_sink->name = av_strdup ("avsink");
+ buffer_sink->name = av_strdup (N_("avsink"));
buffer_sink->priv_size = sizeof (AVSinkContext);
buffer_sink->init = avsink_init;
buffer_sink->query_formats = avsink_query_formats;
buffer_sink->inputs = new AVFilterPad[2];
AVFilterPad* i0 = const_cast<AVFilterPad*> (&buffer_sink->inputs[0]);
- i0->name = "default";
+ i0->name = N_("default");
i0->type = AVMEDIA_TYPE_VIDEO;
i0->min_perms = AV_PERM_READ;
i0->rej_perms = 0;
const_cast<AVFilterPad*> (&buffer_sink->outputs[0])->name = 0;
return buffer_sink;
#else
- AVFilter* buffer_sink = avfilter_get_by_name("buffersink");
+ AVFilter* buffer_sink = avfilter_get_by_name(N_("buffersink"));
if (buffer_sink == 0) {
- throw DecodeError ("Could not create buffer sink filter");
+ throw DecodeError (N_("Could not create buffer sink filter"));
}
return buffer_sink;
#include "filter_graph.h"
#include "subtitle.h"
+#include "i18n.h"
+
using std::cout;
using std::string;
using std::vector;
}
if (avformat_find_stream_info (_format_context, 0) < 0) {
- throw DecodeError ("could not find stream information");
+ throw DecodeError (_("could not find stream information"));
}
/* Find video, audio and subtitle streams and choose the first of each */
}
if (_video_stream < 0) {
- throw DecodeError ("could not find video stream");
+ throw DecodeError (N_("could not find video stream"));
}
_frame = avcodec_alloc_frame ();
if (_frame == 0) {
- throw DecodeError ("could not allocate frame");
+ throw DecodeError (N_("could not allocate frame"));
}
}
_video_codec = avcodec_find_decoder (_video_codec_context->codec_id);
if (_video_codec == 0) {
- throw DecodeError ("could not find video decoder");
+ throw DecodeError (_("could not find video decoder"));
}
if (avcodec_open2 (_video_codec_context, _video_codec, 0) < 0) {
- throw DecodeError ("could not open video decoder");
+ throw DecodeError (N_("could not open video decoder"));
}
}
_audio_codec = avcodec_find_decoder (_audio_codec_context->codec_id);
if (_audio_codec == 0) {
- throw DecodeError ("could not find audio decoder");
+ throw DecodeError (_("could not find audio decoder"));
}
if (avcodec_open2 (_audio_codec_context, _audio_codec, 0) < 0) {
- throw DecodeError ("could not open audio decoder");
+ throw DecodeError (N_("could not open audio decoder"));
}
}
_subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id);
if (_subtitle_codec == 0) {
- throw DecodeError ("could not find subtitle decoder");
+ throw DecodeError (_("could not find subtitle decoder"));
}
if (avcodec_open2 (_subtitle_codec_context, _subtitle_codec, 0) < 0) {
- throw DecodeError ("could not open subtitle decoder");
+ throw DecodeError (N_("could not open subtitle decoder"));
}
}
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
- _film->log()->log (String::compose ("error on av_read_frame (%1) (%2)", buf, r));
+ _film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r));
}
/* Get any remaining frames */
if (r >= 0 && frame_finished) {
if (r != _packet.size) {
- _film->log()->log (String::compose ("Used only %1 bytes of %2 in packet", r, _packet.size));
+ _film->log()->log (String::compose (N_("Used only %1 bytes of %2 in packet"), r, _packet.size));
}
if (_opt.video_sync) {
_film->log()->log (
String::compose (
- "First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)",
+ N_("First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)"),
_first_video.get(), _first_audio.get(), s, ffa->channels(), bytes_per_audio_sample()
)
);
break;
default:
- throw DecodeError (String::compose ("Unrecognised audio sample format (%1)", static_cast<int> (audio_sample_format())));
+ throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format())));
}
return audio;
{
stringstream n;
- AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0);
+ AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0);
if (lang) {
n << lang->value;
}
- AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0);
+ AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0);
if (title) {
if (!n.str().empty()) {
- n << " ";
+ n << N_(" ");
}
n << title->value;
}
if (n.str().empty()) {
- n << "unknown";
+ n << N_("unknown");
}
return n.str ();
if (i == _filter_graphs.end ()) {
graph.reset (new FilterGraph (_film, this, libdcp::Size (frame->width, frame->height), (AVPixelFormat) frame->format));
_filter_graphs.push_back (graph);
- _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
+ _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), frame->width, frame->height, frame->format));
} else {
graph = *i;
}
stringstream s (t);
string type;
s >> type;
- if (type != "ffmpeg") {
+ if (type != N_("ffmpeg")) {
return shared_ptr<FFmpegAudioStream> ();
}
string type;
/* Current (marked version 1) */
n >> type >> _id >> _sample_rate >> _channel_layout;
- assert (type == "ffmpeg");
+ assert (type == N_("ffmpeg"));
}
for (int i = 0; i < name_index; ++i) {
string
FFmpegAudioStream::to_string () const
{
- return String::compose ("ffmpeg %1 %2 %3 %4", _id, _sample_rate, _channel_layout, _name);
+ return String::compose (N_("ffmpeg %1 %2 %3 %4"), _id, _sample_rate, _channel_layout, _name);
}
void
* av_frame_get_best_effort_timestamp(_frame);
_film->log()->log (
- String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
+ String::compose (N_("Source video frame ready; source at %1, output at %2"), source_pts_seconds, out_pts_seconds),
Log::VERBOSE
);
repeat_last_video ();
_film->log()->log (
String::compose (
- "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
+ N_("Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)"),
out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
)
);
filter_and_emit_video (_frame);
} else {
/* Otherwise we are omitting a frame to keep things right */
- _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+ _film->log()->log (String::compose (N_("Frame removed at %1s"), out_pts_seconds));
}
}
, _trust_content_header (true)
, _dcp_content_type (0)
, _format (0)
- , _scaler (Scaler::from_id ("bicubic"))
+ , _scaler (Scaler::from_id (N_("bicubic")))
, _trim_start (0)
, _trim_end (0)
, _dcp_ab (false)
boost::filesystem::path p (boost::filesystem::system_complete (d));
boost::filesystem::path result;
for (boost::filesystem::path::iterator i = p.begin(); i != p.end(); ++i) {
- if (*i == "..") {
- if (boost::filesystem::is_symlink (result) || result.filename() == "..") {
+ if (*i == N_("..")) {
+ if (boost::filesystem::is_symlink (result) || result.filename() == N_("..")) {
result /= *i;
} else {
result = result.parent_path ();
}
- } else if (*i != ".") {
+ } else if (*i != N_(".")) {
result /= *i;
}
}
read_metadata ();
}
- _log = new FileLog (file ("log"));
+ _log = new FileLog (file (N_("log")));
}
Film::Film (Film const & o)
stringstream s;
s << format()->id()
- << "_" << content_digest()
- << "_" << crop().left << "_" << crop().right << "_" << crop().top << "_" << crop().bottom
- << "_" << f.first << "_" << f.second
- << "_" << scaler()->id()
- << "_" << j2k_bandwidth()
- << "_" << boost::lexical_cast<int> (colour_lut());
+ << N_("_") << content_digest()
+ << N_("_") << crop().left << N_("_") << crop().right << N_("_") << crop().top << N_("_") << crop().bottom
+ << N_("_") << f.first << N_("_") << f.second
+ << N_("_") << scaler()->id()
+ << N_("_") << j2k_bandwidth()
+ << N_("_") << boost::lexical_cast<int> (colour_lut());
if (dcp_ab()) {
pair<string, string> fa = Filter::ffmpeg_strings (Config::instance()->reference_filters());
- s << "ab_" << Config::instance()->reference_scaler()->id() << "_" << fa.first << "_" << fa.second;
+ s << N_("ab_") << Config::instance()->reference_scaler()->id() << N_("_") << fa.first << N_("_") << fa.second;
}
return s.str ();
Film::info_dir () const
{
boost::filesystem::path p;
- p /= "info";
+ p /= N_("info");
p /= video_state_identifier ();
return dir (p.string());
}
Film::video_mxf_dir () const
{
boost::filesystem::path p;
- return dir ("video");
+ return dir (N_("video"));
}
string
Film::video_mxf_filename () const
{
- return video_state_identifier() + ".mxf";
+ return video_state_identifier() + N_(".mxf");
}
/** Add suitable Jobs to the JobManager to create a DCP for this Film */
{
set_dci_date_today ();
- if (dcp_name().find ("/") != string::npos) {
- throw BadSettingError ("name", _("cannot contain slashes"));
+ if (dcp_name().find (N_("/")) != string::npos) {
+ throw BadSettingError (_("name"), _("cannot contain slashes"));
}
- log()->log (String::compose ("DVD-o-matic %1 git %2 using %3", dvdomatic_version, dvdomatic_git_commit, dependency_version_summary()));
+ log()->log (String::compose (N_("DVD-o-matic %1 git %2 using %3"), dvdomatic_version, dvdomatic_git_commit, dependency_version_summary()));
{
char buffer[128];
gethostname (buffer, sizeof (buffer));
- log()->log (String::compose ("Starting to make DCP on %1", buffer));
+ log()->log (String::compose (N_("Starting to make DCP on %1"), buffer));
}
- log()->log (String::compose ("Content is %1; type %2", content_path(), (content_type() == STILL ? "still" : "video")));
+ log()->log (String::compose (N_("Content is %1; type %2"), content_path(), (content_type() == STILL ? _("still") : _("video"))));
if (length()) {
- log()->log (String::compose ("Content length %1", length().get()));
+ log()->log (String::compose (N_("Content length %1"), length().get()));
}
- log()->log (String::compose ("Content digest %1", content_digest()));
- log()->log (String::compose ("%1 threads", Config::instance()->num_local_encoding_threads()));
- log()->log (String::compose ("J2K bandwidth %1", j2k_bandwidth()));
+ log()->log (String::compose (N_("Content digest %1"), content_digest()));
+ log()->log (String::compose (N_("%1 threads"), Config::instance()->num_local_encoding_threads()));
+ log()->log (String::compose (N_("J2K bandwidth %1"), j2k_bandwidth()));
#ifdef DVDOMATIC_DEBUG
- log()->log ("DVD-o-matic built in debug mode.");
+ log()->log (N_("DVD-o-matic built in debug mode."));
#else
- log()->log ("DVD-o-matic built in optimised mode.");
+ log()->log (N_("DVD-o-matic built in optimised mode."));
#endif
#ifdef LIBDCP_DEBUG
- log()->log ("libdcp built in debug mode.");
+ log()->log (N_("libdcp built in debug mode."));
#else
- log()->log ("libdcp built in optimised mode.");
+ log()->log (N_("libdcp built in optimised mode."));
#endif
pair<string, int> const c = cpu_info ();
- log()->log (String::compose ("CPU: %1, %2 processors", c.first, c.second));
+ log()->log (String::compose (N_("CPU: %1, %2 processors"), c.first, c.second));
if (format() == 0) {
- throw MissingSettingError ("format");
+ throw MissingSettingError (_("format"));
}
if (content().empty ()) {
- throw MissingSettingError ("content");
+ throw MissingSettingError (_("content"));
}
if (dcp_content_type() == 0) {
- throw MissingSettingError ("content type");
+ throw MissingSettingError (_("content type"));
}
if (name().empty()) {
- throw MissingSettingError ("name");
+ throw MissingSettingError (_("name"));
}
DecodeOptions od;
boost::filesystem::create_directories (directory());
- string const m = file ("metadata");
+ string const m = file (N_("metadata"));
ofstream f (m.c_str ());
if (!f.good ()) {
throw CreateFileError (m);
}
- f << "version " << state_version << "\n";
+ f << N_("version ") << state_version << N_("\n");
/* User stuff */
- f << "name " << _name << "\n";
- f << "use_dci_name " << _use_dci_name << "\n";
- f << "content " << _content << "\n";
- f << "trust_content_header " << (_trust_content_header ? "1" : "0") << "\n";
+ f << N_("name ") << _name << N_("\n");
+ f << N_("use_dci_name ") << _use_dci_name << N_("\n");
+ f << N_("content ") << _content << N_("\n");
+ f << N_("trust_content_header ") << (_trust_content_header ? N_("1") : N_("0")) << N_("\n");
if (_dcp_content_type) {
- f << "dcp_content_type " << _dcp_content_type->dci_name () << "\n";
+ f << N_("dcp_content_type ") << _dcp_content_type->dci_name () << N_("\n");
}
if (_format) {
- f << "format " << _format->as_metadata () << "\n";
+ f << N_("format ") << _format->as_metadata () << N_("\n");
}
- f << "left_crop " << _crop.left << "\n";
- f << "right_crop " << _crop.right << "\n";
- f << "top_crop " << _crop.top << "\n";
- f << "bottom_crop " << _crop.bottom << "\n";
+ f << N_("left_crop ") << _crop.left << N_("\n");
+ f << N_("right_crop ") << _crop.right << N_("\n");
+ f << N_("top_crop ") << _crop.top << N_("\n");
+ f << N_("bottom_crop ") << _crop.bottom << N_("\n");
for (vector<Filter const *>::const_iterator i = _filters.begin(); i != _filters.end(); ++i) {
- f << "filter " << (*i)->id () << "\n";
+ f << N_("filter ") << (*i)->id () << N_("\n");
}
- f << "scaler " << _scaler->id () << "\n";
- f << "trim_start " << _trim_start << "\n";
- f << "trim_end " << _trim_end << "\n";
- f << "dcp_ab " << (_dcp_ab ? "1" : "0") << "\n";
+ f << N_("scaler ") << _scaler->id () << N_("\n");
+ f << N_("trim_start ") << _trim_start << N_("\n");
+ f << N_("trim_end ") << _trim_end << N_("\n");
+ f << N_("dcp_ab ") << (_dcp_ab ? N_("1") : N_("0")) << N_("\n");
if (_content_audio_stream) {
- f << "selected_content_audio_stream " << _content_audio_stream->to_string() << "\n";
+ f << N_("selected_content_audio_stream ") << _content_audio_stream->to_string() << N_("\n");
}
for (vector<string>::const_iterator i = _external_audio.begin(); i != _external_audio.end(); ++i) {
- f << "external_audio " << *i << "\n";
+ f << N_("external_audio ") << *i << N_("\n");
}
- f << "use_content_audio " << (_use_content_audio ? "1" : "0") << "\n";
- f << "audio_gain " << _audio_gain << "\n";
- f << "audio_delay " << _audio_delay << "\n";
- f << "still_duration " << _still_duration << "\n";
+ f << N_("use_content_audio ") << (_use_content_audio ? N_("1") : N_("0")) << N_("\n");
+ f << N_("audio_gain ") << _audio_gain << N_("\n");
+ f << N_("audio_delay ") << _audio_delay << N_("\n");
+ f << N_("still_duration ") << _still_duration << N_("\n");
if (_subtitle_stream) {
- f << "selected_subtitle_stream " << _subtitle_stream->to_string() << "\n";
+ f << N_("selected_subtitle_stream ") << _subtitle_stream->to_string() << N_("\n");
}
- f << "with_subtitles " << _with_subtitles << "\n";
- f << "subtitle_offset " << _subtitle_offset << "\n";
- f << "subtitle_scale " << _subtitle_scale << "\n";
- f << "colour_lut " << _colour_lut << "\n";
- f << "j2k_bandwidth " << _j2k_bandwidth << "\n";
+ f << N_("with_subtitles ") << _with_subtitles << N_("\n");
+ f << N_("subtitle_offset ") << _subtitle_offset << N_("\n");
+ f << N_("subtitle_scale ") << _subtitle_scale << N_("\n");
+ f << N_("colour_lut ") << _colour_lut << N_("\n");
+ f << N_("j2k_bandwidth ") << _j2k_bandwidth << N_("\n");
_dci_metadata.write (f);
- f << "dci_date " << boost::gregorian::to_iso_string (_dci_date) << "\n";
- f << "width " << _size.width << "\n";
- f << "height " << _size.height << "\n";
- f << "length " << _length.get_value_or(0) << "\n";
- f << "dcp_intrinsic_duration " << _dcp_intrinsic_duration.get_value_or(0) << "\n";
- f << "content_digest " << _content_digest << "\n";
+ f << N_("dci_date ") << boost::gregorian::to_iso_string (_dci_date) << N_("\n");
+ f << N_("width ") << _size.width << N_("\n");
+ f << N_("height ") << _size.height << N_("\n");
+ f << N_("length ") << _length.get_value_or(0) << N_("\n");
+ f << N_("dcp_intrinsic_duration ") << _dcp_intrinsic_duration.get_value_or(0) << N_("\n");
+ f << N_("content_digest ") << _content_digest << N_("\n");
for (vector<shared_ptr<AudioStream> >::const_iterator i = _content_audio_streams.begin(); i != _content_audio_streams.end(); ++i) {
- f << "content_audio_stream " << (*i)->to_string () << "\n";
+ f << N_("content_audio_stream ") << (*i)->to_string () << N_("\n");
}
- f << "external_audio_stream " << _external_audio_stream->to_string() << "\n";
+ f << N_("external_audio_stream ") << _external_audio_stream->to_string() << N_("\n");
for (vector<shared_ptr<SubtitleStream> >::const_iterator i = _subtitle_streams.begin(); i != _subtitle_streams.end(); ++i) {
- f << "subtitle_stream " << (*i)->to_string () << "\n";
+ f << N_("subtitle_stream ") << (*i)->to_string () << N_("\n");
}
- f << "frames_per_second " << _frames_per_second << "\n";
+ f << N_("frames_per_second ") << _frames_per_second << N_("\n");
_dirty = false;
}
boost::optional<int> audio_stream_index;
boost::optional<int> subtitle_stream_index;
- ifstream f (file ("metadata").c_str());
+ ifstream f (file (N_("metadata")).c_str());
if (!f.good()) {
- throw OpenFileError (file ("metadata"));
+ throw OpenFileError (file (N_("metadata")));
}
multimap<string, string> kv = read_key_value (f);
/* We need version before anything else */
- multimap<string, string>::iterator v = kv.find ("version");
+ multimap<string, string>::iterator v = kv.find (N_("version"));
if (v != kv.end ()) {
version = atoi (v->second.c_str());
}
string const k = i->first;
string const v = i->second;
- if (k == "audio_sample_rate") {
+ if (k == N_("audio_sample_rate")) {
audio_sample_rate = atoi (v.c_str());
}
/* User-specified stuff */
- if (k == "name") {
+ if (k == N_("name")) {
_name = v;
- } else if (k == "use_dci_name") {
- _use_dci_name = (v == "1");
- } else if (k == "content") {
+ } else if (k == N_("use_dci_name")) {
+ _use_dci_name = (v == N_("1"));
+ } else if (k == N_("content")) {
_content = v;
- } else if (k == "trust_content_header") {
- _trust_content_header = (v == "1");
- } else if (k == "dcp_content_type") {
+ } else if (k == N_("trust_content_header")) {
+ _trust_content_header = (v == N_("1"));
+ } else if (k == N_("dcp_content_type")) {
if (version < 3) {
_dcp_content_type = DCPContentType::from_pretty_name (v);
} else {
_dcp_content_type = DCPContentType::from_dci_name (v);
}
- } else if (k == "format") {
+ } else if (k == N_("format")) {
_format = Format::from_metadata (v);
- } else if (k == "left_crop") {
+ } else if (k == N_("left_crop")) {
_crop.left = atoi (v.c_str ());
- } else if (k == "right_crop") {
+ } else if (k == N_("right_crop")) {
_crop.right = atoi (v.c_str ());
- } else if (k == "top_crop") {
+ } else if (k == N_("top_crop")) {
_crop.top = atoi (v.c_str ());
- } else if (k == "bottom_crop") {
+ } else if (k == N_("bottom_crop")) {
_crop.bottom = atoi (v.c_str ());
- } else if (k == "filter") {
+ } else if (k == N_("filter")) {
_filters.push_back (Filter::from_id (v));
- } else if (k == "scaler") {
+ } else if (k == N_("scaler")) {
_scaler = Scaler::from_id (v);
- } else if ( ((!version || version < 2) && k == "dcp_trim_start") || k == "trim_start") {
+ } else if ( ((!version || version < 2) && k == N_("dcp_trim_start")) || k == N_("trim_start")) {
_trim_start = atoi (v.c_str ());
- } else if ( ((!version || version < 2) && k == "dcp_trim_end") || k == "trim_end") {
+ } else if ( ((!version || version < 2) && k == N_("dcp_trim_end")) || k == N_("trim_end")) {
_trim_end = atoi (v.c_str ());
- } else if (k == "dcp_ab") {
- _dcp_ab = (v == "1");
- } else if (k == "selected_content_audio_stream" || (!version && k == "selected_audio_stream")) {
+ } else if (k == N_("dcp_ab")) {
+ _dcp_ab = (v == N_("1"));
+ } else if (k == N_("selected_content_audio_stream") || (!version && k == N_("selected_audio_stream"))) {
if (!version) {
audio_stream_index = atoi (v.c_str ());
} else {
_content_audio_stream = audio_stream_factory (v, version);
}
- } else if (k == "external_audio") {
+ } else if (k == N_("external_audio")) {
_external_audio.push_back (v);
- } else if (k == "use_content_audio") {
- _use_content_audio = (v == "1");
- } else if (k == "audio_gain") {
+ } else if (k == N_("use_content_audio")) {
+ _use_content_audio = (v == N_("1"));
+ } else if (k == N_("audio_gain")) {
_audio_gain = atof (v.c_str ());
- } else if (k == "audio_delay") {
+ } else if (k == N_("audio_delay")) {
_audio_delay = atoi (v.c_str ());
- } else if (k == "still_duration") {
+ } else if (k == N_("still_duration")) {
_still_duration = atoi (v.c_str ());
- } else if (k == "selected_subtitle_stream") {
+ } else if (k == N_("selected_subtitle_stream")) {
if (!version) {
subtitle_stream_index = atoi (v.c_str ());
} else {
_subtitle_stream = subtitle_stream_factory (v, version);
}
- } else if (k == "with_subtitles") {
- _with_subtitles = (v == "1");
- } else if (k == "subtitle_offset") {
+ } else if (k == N_("with_subtitles")) {
+ _with_subtitles = (v == N_("1"));
+ } else if (k == N_("subtitle_offset")) {
_subtitle_offset = atoi (v.c_str ());
- } else if (k == "subtitle_scale") {
+ } else if (k == N_("subtitle_scale")) {
_subtitle_scale = atof (v.c_str ());
- } else if (k == "colour_lut") {
+ } else if (k == N_("colour_lut")) {
_colour_lut = atoi (v.c_str ());
- } else if (k == "j2k_bandwidth") {
+ } else if (k == N_("j2k_bandwidth")) {
_j2k_bandwidth = atoi (v.c_str ());
- } else if (k == "dci_date") {
+ } else if (k == N_("dci_date")) {
_dci_date = boost::gregorian::from_undelimited_string (v);
}
_dci_metadata.read (k, v);
/* Cached stuff */
- if (k == "width") {
+ if (k == N_("width")) {
_size.width = atoi (v.c_str ());
- } else if (k == "height") {
+ } else if (k == N_("height")) {
_size.height = atoi (v.c_str ());
- } else if (k == "length") {
+ } else if (k == N_("length")) {
int const vv = atoi (v.c_str ());
if (vv) {
_length = vv;
}
- } else if (k == "dcp_intrinsic_duration") {
+ } else if (k == N_("dcp_intrinsic_duration")) {
int const vv = atoi (v.c_str ());
if (vv) {
_dcp_intrinsic_duration = vv;
}
- } else if (k == "content_digest") {
+ } else if (k == N_("content_digest")) {
_content_digest = v;
- } else if (k == "content_audio_stream" || (!version && k == "audio_stream")) {
+ } else if (k == N_("content_audio_stream") || (!version && k == N_("audio_stream"))) {
_content_audio_streams.push_back (audio_stream_factory (v, version));
- } else if (k == "external_audio_stream") {
+ } else if (k == N_("external_audio_stream")) {
_external_audio_stream = audio_stream_factory (v, version);
- } else if (k == "subtitle_stream") {
+ } else if (k == N_("subtitle_stream")) {
_subtitle_streams.push_back (subtitle_stream_factory (v, version));
- } else if (k == "frames_per_second") {
+ } else if (k == N_("frames_per_second")) {
_frames_per_second = atof (v.c_str ());
}
}
fixed_name = fixed_name.substr (0, 14);
}
- d << fixed_name << "_";
+ d << fixed_name << N_("_");
if (dcp_content_type()) {
- d << dcp_content_type()->dci_name() << "_";
+ d << dcp_content_type()->dci_name() << N_("_");
}
if (format()) {
- d << format()->dci_name() << "_";
+ d << format()->dci_name() << N_("_");
}
DCIMetadata const dm = dci_metadata ();
if (!dm.audio_language.empty ()) {
d << dm.audio_language;
if (!dm.subtitle_language.empty() && with_subtitles()) {
- d << "-" << dm.subtitle_language;
+ d << N_("-") << dm.subtitle_language;
} else {
- d << "-XX";
+ d << N_("-XX");
}
- d << "_";
+ d << N_("_");
}
if (!dm.territory.empty ()) {
d << dm.territory;
if (!dm.rating.empty ()) {
- d << "-" << dm.rating;
+ d << N_("-") << dm.rating;
}
- d << "_";
+ d << N_("_");
}
switch (audio_channels()) {
case 1:
- d << "10_";
+ d << N_("10_");
break;
case 2:
- d << "20_";
+ d << N_("20_");
break;
case 6:
- d << "51_";
+ d << N_("51_");
break;
case 8:
- d << "71_";
+ d << N_("71_");
break;
}
- d << "2K_";
+ d << N_("2K_");
if (!dm.studio.empty ()) {
- d << dm.studio << "_";
+ d << dm.studio << N_("_");
}
if (if_created_now) {
- d << boost::gregorian::to_iso_string (boost::gregorian::day_clock::local_day ()) << "_";
+ d << boost::gregorian::to_iso_string (boost::gregorian::day_clock::local_day ()) << N_("_");
} else {
- d << boost::gregorian::to_iso_string (_dci_date) << "_";
+ d << boost::gregorian::to_iso_string (_dci_date) << N_("_");
}
if (!dm.facility.empty ()) {
- d << dm.facility << "_";
+ d << dm.facility << N_("_");
}
if (!dm.package_type.empty ()) {
{
string check = directory ();
- boost::filesystem::path slash ("/");
+ boost::filesystem::path slash (N_("/"));
string platform_slash = slash.make_preferred().string ();
if (!ends_with (check, platform_slash)) {
/* Default format */
switch (content_type()) {
case STILL:
- set_format (Format::from_id ("var-185"));
+ set_format (Format::from_id (N_("var-185")));
break;
case VIDEO:
- set_format (Format::from_id ("185"));
+ set_format (Format::from_id (N_("185")));
break;
}
stringstream s;
s.width (8);
- s << setfill('0') << f << ".md5";
+ s << setfill('0') << f << N_(".md5");
p /= s.str();
Film::j2c_path (int f, bool t) const
{
boost::filesystem::path p;
- p /= "j2c";
+ p /= N_("j2c");
p /= video_state_identifier ();
stringstream s;
s.width (8);
- s << setfill('0') << f << ".j2c";
+ s << setfill('0') << f << N_(".j2c");
if (t) {
- s << ".tmp";
+ s << N_(".tmp");
}
p /= s.str();
#include <libpostproc/postprocess.h>
}
+#include "i18n.h"
+
using namespace std;
vector<Filter const *> Filter::_filters;
{
/* Note: "none" is a magic id name, so don't use it here */
- maybe_add ("pphb", "Horizontal deblocking filter", "De-blocking", "", "hb");
- maybe_add ("ppvb", "Vertical deblocking filter", "De-blocking", "", "vb");
- maybe_add ("ppha", "Horizontal deblocking filter A", "De-blocking", "", "ha");
- maybe_add ("ppva", "Vertical deblocking filter A", "De-blocking", "", "va");
- maybe_add ("pph1", "Experimental horizontal deblocking filter 1", "De-blocking", "", "h1");
- maybe_add ("pphv", "Experimental vertical deblocking filter 1", "De-blocking", "", "v1");
- maybe_add ("ppdr", "Deringing filter", "Misc", "", "dr");
- maybe_add ("pplb", "Linear blend deinterlacer", "De-interlacing", "", "lb");
- maybe_add ("ppli", "Linear interpolating deinterlacer", "De-interlacing", "", "li");
- maybe_add ("ppci", "Cubic interpolating deinterlacer", "De-interlacing", "", "ci");
- maybe_add ("ppmd", "Median deinterlacer", "De-interlacing", "", "md");
- maybe_add ("ppfd", "FFMPEG deinterlacer", "De-interlacing", "", "fd");
- maybe_add ("ppl5", "FIR low-pass deinterlacer", "De-interlacing", "", "l5");
- maybe_add ("mcdeint", "Motion compensating deinterlacer", "De-interlacing", "mcdeint", "");
- maybe_add ("kerndeint", "Kernel deinterlacer", "De-interlacing", "kerndeint", "");
- maybe_add ("yadif", "Yet Another Deinterlacing Filter", "De-interlacing", "yadif", "");
- maybe_add ("pptn", "Temporal noise reducer", "Noise reduction", "", "tn");
- maybe_add ("ppfq", "Force quantizer", "Misc", "", "fq");
- maybe_add ("gradfun", "Gradient debander", "Misc", "gradfun", "");
- maybe_add ("unsharp", "Unsharp mask and Gaussian blur", "Misc", "unsharp", "");
- maybe_add ("denoise3d", "3D denoiser", "Noise reduction", "denoise3d", "");
- maybe_add ("hqdn3d", "High quality 3D denoiser", "Noise reduction", "hqdn3d", "");
- maybe_add ("telecine", "Telecine filter", "Misc", "telecine", "");
- maybe_add ("ow", "Overcomplete wavelet denoiser", "Noise reduction", "mp=ow", "");
+ maybe_add (N_("pphb"), _("Horizontal deblocking filter"), _("De-blocking"), N_(""), N_("hb"));
+ maybe_add (N_("ppvb"), _("Vertical deblocking filter"), _("De-blocking"), N_(""), N_("vb"));
+ maybe_add (N_("ppha"), _("Horizontal deblocking filter A"), _("De-blocking"), N_(""), N_("ha"));
+ maybe_add (N_("ppva"), _("Vertical deblocking filter A"), _("De-blocking"), N_(""), N_("va"));
+ maybe_add (N_("pph1"), _("Experimental horizontal deblocking filter 1"), _("De-blocking"), N_(""), N_("h1"));
+ maybe_add (N_("pphv"), _("Experimental vertical deblocking filter 1"), _("De-blocking"), N_(""), N_("v1"));
+ maybe_add (N_("ppdr"), _("Deringing filter"), _("Misc"), N_(""), N_("dr"));
+ maybe_add (N_("pplb"), _("Linear blend deinterlacer"), _("De-interlacing"), N_(""), N_("lb"));
+ maybe_add (N_("ppli"), _("Linear interpolating deinterlacer"), _("De-interlacing"), N_(""), N_("li"));
+ maybe_add (N_("ppci"), _("Cubic interpolating deinterlacer"), _("De-interlacing"), N_(""), N_("ci"));
+ maybe_add (N_("ppmd"), _("Median deinterlacer"), _("De-interlacing"), N_(""), N_("md"));
+ maybe_add (N_("ppfd"), _("FFMPEG deinterlacer"), _("De-interlacing"), N_(""), N_("fd"));
+ maybe_add (N_("ppl5"), _("FIR low-pass deinterlacer"), _("De-interlacing"), N_(""), N_("l5"));
+ maybe_add (N_("mcdeint"), _("Motion compensating deinterlacer"), _("De-interlacing"), N_("mcdeint"), N_(""));
+ maybe_add (N_("kerndeint"), _("Kernel deinterlacer"), _("De-interlacing"), N_("kerndeint"), N_(""));
+ maybe_add (N_("yadif"), _("Yet Another Deinterlacing Filter"), _("De-interlacing"), N_("yadif"), N_(""));
+ maybe_add (N_("pptn"), _("Temporal noise reducer"), _("Noise reduction"), N_(""), N_("tn"));
+ maybe_add (N_("ppfq"), _("Force quantizer"), _("Misc"), N_(""), N_("fq"));
+ maybe_add (N_("gradfun"), _("Gradient debander"), _("Misc"), N_("gradfun"), N_(""));
+ maybe_add (N_("unsharp"), _("Unsharp mask and Gaussian blur"), _("Misc"), N_("unsharp"), N_(""));
+ maybe_add (N_("denoise3d"), _("3D denoiser"), _("Noise reduction"), N_("denoise3d"), N_(""));
+ maybe_add (N_("hqdn3d"), _("High quality 3D denoiser"), _("Noise reduction"), N_("hqdn3d"), N_(""));
+ maybe_add (N_("telecine"), _("Telecine filter"), _("Misc"), N_("telecine"), N_(""));
+ maybe_add (N_("ow"), _("Overcomplete wavelet denoiser"), _("Noise reduction"), N_("mp=ow"), N_(""));
}
void
for (vector<Filter const *>::const_iterator i = filters.begin(); i != filters.end(); ++i) {
if (!(*i)->vf().empty ()) {
if (!vf.empty ()) {
- vf += ",";
+ vf += N_(",");
}
vf += (*i)->vf ();
}
if (!(*i)->pp().empty ()) {
if (!pp.empty()) {
- pp += ",";
+ pp += N_(",");
}
pp += (*i)->pp ();
}
#include "film.h"
#include "ffmpeg_decoder.h"
+#include "i18n.h"
+
using std::stringstream;
using std::string;
using std::list;
{
string filters = Filter::ffmpeg_strings (film->filters()).first;
if (!filters.empty ()) {
- filters += ",";
+ filters += N_(",");
}
filters += crop_string (Position (film->crop().left, film->crop().top), film->cropped_size (decoder->native_size()));
AVFilterGraph* graph = avfilter_graph_alloc();
if (graph == 0) {
- throw DecodeError ("Could not create filter graph.");
+ throw DecodeError (N_("could not create filter graph."));
}
- AVFilter* buffer_src = avfilter_get_by_name("buffer");
+ AVFilter* buffer_src = avfilter_get_by_name(N_("buffer"));
if (buffer_src == 0) {
- throw DecodeError ("Could not find buffer src filter");
+ throw DecodeError (N_("could not find buffer src filter"));
}
AVFilter* buffer_sink = get_sink ();
stringstream a;
- a << _size.width << ":"
- << _size.height << ":"
- << _pixel_format << ":"
- << decoder->time_base_numerator() << ":"
- << decoder->time_base_denominator() << ":"
- << decoder->sample_aspect_ratio_numerator() << ":"
+ a << _size.width << N_(":")
+ << _size.height << N_(":")
+ << _pixel_format << N_(":")
+ << decoder->time_base_numerator() << N_(":")
+ << decoder->time_base_denominator() << N_(":")
+ << decoder->sample_aspect_ratio_numerator() << N_(":")
<< decoder->sample_aspect_ratio_denominator();
int r;
- if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) {
- throw DecodeError ("could not create buffer source");
+ if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, N_("in"), a.str().c_str(), 0, graph)) < 0) {
+ throw DecodeError (N_("could not create buffer source"));
}
AVBufferSinkParams* sink_params = av_buffersink_params_alloc ();
pixel_fmts[1] = PIX_FMT_NONE;
sink_params->pixel_fmts = pixel_fmts;
- if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, "out", 0, sink_params, graph) < 0) {
- throw DecodeError ("could not create buffer sink.");
+ if (avfilter_graph_create_filter (&_buffer_sink_context, buffer_sink, N_("out"), 0, sink_params, graph) < 0) {
+ throw DecodeError (N_("could not create buffer sink."));
}
AVFilterInOut* outputs = avfilter_inout_alloc ();
- outputs->name = av_strdup("in");
+ outputs->name = av_strdup(N_("in"));
outputs->filter_ctx = _buffer_src_context;
outputs->pad_idx = 0;
outputs->next = 0;
AVFilterInOut* inputs = avfilter_inout_alloc ();
- inputs->name = av_strdup("out");
+ inputs->name = av_strdup(N_("out"));
inputs->filter_ctx = _buffer_sink_context;
inputs->pad_idx = 0;
inputs->next = 0;
#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
if (avfilter_graph_parse (graph, filters.c_str(), inputs, outputs, 0) < 0) {
- throw DecodeError ("could not set up filter graph.");
+ throw DecodeError (N_("could not set up filter graph."));
}
#else
if (avfilter_graph_parse (graph, filters.c_str(), &inputs, &outputs, 0) < 0) {
- throw DecodeError ("could not set up filter graph.");
+ throw DecodeError (N_("could not set up filter graph."));
}
#endif
if (avfilter_graph_config (graph, 0) < 0) {
- throw DecodeError ("could not configure filter graph.");
+ throw DecodeError (N_("could not configure filter graph."));
}
/* XXX: leaking `inputs' / `outputs' ? */
#if LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR >= 53 && LIBAVFILTER_VERSION_MINOR <= 61
if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0) < 0) {
- throw DecodeError ("could not push buffer into filter chain.");
+ throw DecodeError (N_("could not push buffer into filter chain."));
}
#elif LIBAVFILTER_VERSION_MAJOR == 2 && LIBAVFILTER_VERSION_MINOR == 15
par.den = sample_aspect_ratio_denominator ();
if (av_vsrc_buffer_add_frame (_buffer_src_context, frame, 0, par) < 0) {
- throw DecodeError ("could not push buffer into filter chain.");
+ throw DecodeError (N_("could not push buffer into filter chain."));
}
#else
if (av_buffersrc_write_frame (_buffer_src_context, frame) < 0) {
- throw DecodeError ("could not push buffer into filter chain.");
+ throw DecodeError (N_("could not push buffer into filter chain."));
}
#endif
int r = avfilter_request_frame (_buffer_sink_context->inputs[0]);
if (r < 0) {
- throw DecodeError ("could not request filtered frame");
+ throw DecodeError (N_("could not request filtered frame"));
}
AVFilterBufferRef* filter_buffer = _buffer_sink_context->inputs[0]->cur_buf;
#include "format.h"
#include "film.h"
+#include "i18n.h"
+
using std::string;
using std::setprecision;
using std::stringstream;
{
stringstream s;
if (!_nickname.empty ()) {
- s << _nickname << " (";
+ s << _nickname << N_(" (");
}
- s << setprecision(3) << (_ratio / 100.0) << ":1";
+ s << setprecision(3) << (_ratio / 100.0) << N_(":1");
if (!_nickname.empty ()) {
- s << ")";
+ s << N_(")");
}
return s.str ();
void
Format::setup_formats ()
{
- _formats.push_back (new FixedFormat (119, libdcp::Size (1285, 1080), "119", "1.19", "F"));
- _formats.push_back (new FixedFormat (133, libdcp::Size (1436, 1080), "133", "1.33", "F"));
- _formats.push_back (new FixedFormat (138, libdcp::Size (1485, 1080), "138", "1.375", "F"));
- _formats.push_back (new FixedFormat (133, libdcp::Size (1998, 1080), "133-in-flat", "4:3 within Flat", "F"));
- _formats.push_back (new FixedFormat (137, libdcp::Size (1480, 1080), "137", "Academy", "F"));
- _formats.push_back (new FixedFormat (166, libdcp::Size (1793, 1080), "166", "1.66", "F"));
- _formats.push_back (new FixedFormat (166, libdcp::Size (1998, 1080), "166-in-flat", "1.66 within Flat", "F"));
- _formats.push_back (new FixedFormat (178, libdcp::Size (1998, 1080), "178-in-flat", "16:9 within Flat", "F"));
- _formats.push_back (new FixedFormat (178, libdcp::Size (1920, 1080), "178", "16:9", "F"));
- _formats.push_back (new FixedFormat (185, libdcp::Size (1998, 1080), "185", "Flat", "F"));
- _formats.push_back (new FixedFormat (239, libdcp::Size (2048, 858), "239", "Scope", "S"));
- _formats.push_back (new VariableFormat (libdcp::Size (1998, 1080), "var-185", "Flat", "F"));
- _formats.push_back (new VariableFormat (libdcp::Size (2048, 858), "var-239", "Scope", "S"));
+ /// TRANSLATORS: these are film picture aspect ratios; "Academy" means 1.37, "Flat" 1.85 and "Scope" 2.39.
+ _formats.push_back (new FixedFormat (119, libdcp::Size (1285, 1080), N_("119"), _("1.19"), N_("F")));
+ _formats.push_back (new FixedFormat (133, libdcp::Size (1436, 1080), N_("133"), _("1.33"), N_("F")));
+ _formats.push_back (new FixedFormat (138, libdcp::Size (1485, 1080), N_("138"), _("1.375"), N_("F")));
+ _formats.push_back (new FixedFormat (133, libdcp::Size (1998, 1080), N_("133-in-flat"), _("4:3 within Flat"), N_("F")));
+ _formats.push_back (new FixedFormat (137, libdcp::Size (1480, 1080), N_("137"), _("Academy"), N_("F")));
+ _formats.push_back (new FixedFormat (166, libdcp::Size (1793, 1080), N_("166"), _("1.66"), N_("F")));
+ _formats.push_back (new FixedFormat (166, libdcp::Size (1998, 1080), N_("166-in-flat"), _("1.66 within Flat"), N_("F")));
+ _formats.push_back (new FixedFormat (178, libdcp::Size (1998, 1080), N_("178-in-flat"), _("16:9 within Flat"), N_("F")));
+ _formats.push_back (new FixedFormat (178, libdcp::Size (1920, 1080), N_("178"), _("16:9"), N_("F")));
+ _formats.push_back (new FixedFormat (185, libdcp::Size (1998, 1080), N_("185"), _("Flat"), N_("F")));
+ _formats.push_back (new FixedFormat (239, libdcp::Size (2048, 858), N_("239"), _("Scope"), N_("S")));
+ _formats.push_back (new VariableFormat (libdcp::Size (1998, 1080), N_("var-185"), _("Flat"), N_("F")));
+ _formats.push_back (new VariableFormat (libdcp::Size (2048, 858), N_("var-239"), _("Scope"), N_("S")));
}
/** @param n Nickname.
#include "exceptions.h"
#include "scaler.h"
+#include "i18n.h"
+
using namespace std;
using namespace boost;
using libdcp::Size;
case PIX_FMT_YUV444P10LE:
return size().height;
default:
- throw PixelFormatError ("lines()", _pixel_format);
+ throw PixelFormatError (N_("lines()"), _pixel_format);
}
return 0;
case PIX_FMT_RGBA:
return 1;
default:
- throw PixelFormatError ("components()", _pixel_format);
+ throw PixelFormatError (N_("components()"), _pixel_format);
}
return 0;
case PIX_FMT_YUV444P10LE:
pp_format = PP_FORMAT_444;
default:
- throw PixelFormatError ("post_process", pixel_format());
+ throw PixelFormatError (N_("post_process"), pixel_format());
}
pp_mode* mode = pp_get_mode_by_name_and_quality (pp.c_str (), PP_QUALITY_MAX);
#include "film.h"
#include "exceptions.h"
+#include "i18n.h"
+
using std::cout;
using boost::shared_ptr;
using libdcp::Size;
ImageMagickDecoder::native_size () const
{
if (_files.empty ()) {
- throw DecodeError ("no still image files found");
+ throw DecodeError (_("no still image files found"));
}
/* Look at the first file and assume its size holds for all */
#include "job.h"
#include "util.h"
+#include "i18n.h"
+
using std::string;
using std::list;
using std::stringstream;
set_progress (1);
set_state (FINISHED_ERROR);
- string m = String::compose ("An error occurred whilst handling the file %1.", boost::filesystem::path (e.filename()).leaf());
+ string m = String::compose (_("An error occurred whilst handling the file %1."), boost::filesystem::path (e.filename()).leaf());
boost::filesystem::space_info const s = boost::filesystem::space (e.filename());
if (s.available < pow (1024, 3)) {
- m += "\n\nThe drive that the film is stored on is low in disc space. Free some more space and try again.";
+ m += N_("\n\n");
+ m += _("The drive that the film is stored on is low in disc space. Free some more space and try again.");
}
set_error (e.what(), m);
set_state (FINISHED_ERROR);
set_error (
e.what (),
- "It is not known what caused this error. The best idea is to report the problem to the DVD-o-matic mailing list (dvdomatic@carlh.net)"
+ _("It is not known what caused this error. The best idea is to report the problem to the DVD-o-matic mailing list (dvdomatic@carlh.net)")
);
} catch (...) {
set_progress (1);
set_state (FINISHED_ERROR);
set_error (
- "Unknown error",
- "It is not known what caused this error. The best idea is to report the problem to the DVD-o-matic mailing list (dvdomatic@carlh.net)"
+ _("Unknown error"),
+ _("It is not known what caused this error. The best idea is to report the problem to the DVD-o-matic mailing list (dvdomatic@carlh.net)")
);
}
stringstream s;
if (!finished ()) {
- s << pc << "%";
+ s << pc << N_("%");
if (p >= 0 && t > 10 && r > 0) {
- s << "; " << seconds_to_approximate_hms (r) << " remaining";
+ /// TRANSLATORS: remaining here follows an amount of time that is remaining
+ /// on an operation.
+ s << "; " << seconds_to_approximate_hms (r) << " " << _("remaining");
}
} else if (finished_ok ()) {
- s << "OK (ran for " << seconds_to_hms (_ran_for) << ")";
+ s << String::compose (_("OK (ran for %1)"), seconds_to_hms (_ran_for));
} else if (finished_in_error ()) {
- s << "Error (" << error_summary() << ")";
+ s << String::compose (_("Error (%1)"), error_summary());
}
return s.str ();
#include <time.h>
#include "log.h"
+#include "i18n.h"
+
using namespace std;
Log::Log ()
string a = ctime (&t);
stringstream s;
- s << a.substr (0, a.length() - 1) << ": " << m;
+ s << a.substr (0, a.length() - 1) << N_(": ") << m;
do_log (s.str ());
}
gettimeofday (&tv, 0);
stringstream s;
- s << tv.tv_sec << ":" << tv.tv_usec << " " << m;
+ s << tv.tv_sec << N_(":") << tv.tv_usec << N_(" ") << m;
do_log (s.str ());
}
void
Log::set_level (string l)
{
- if (l == "verbose") {
+ if (l == N_("verbose")) {
set_level (VERBOSE);
return;
- } else if (l == "timing") {
+ } else if (l == N_("timing")) {
set_level (TIMING);
return;
}
FileLog::do_log (string m)
{
ofstream f (_file.c_str(), fstream::app);
- f << m << "\n";
+ f << m << N_("\n");
}
#include "image.h"
#include "log.h"
+#include "i18n.h"
+
using std::min;
using boost::shared_ptr;
_log->log (
String::compose (
- "Matching processor has seen %1 video frames (which equals %2 audio frames) and %3 audio frames",
+ N_("Matching processor has seen %1 video frames (which equals %2 audio frames) and %3 audio frames"),
_video_frames,
video_frames_to_audio_frames (_video_frames, _sample_rate, _frames_per_second),
_audio_frames
if (audio_short_by_frames < 0) {
- _log->log (String::compose ("%1 too many audio frames", -audio_short_by_frames));
+ _log->log (String::compose (N_("%1 too many audio frames"), -audio_short_by_frames));
/* We have seen more audio than video. Emit enough black video frames so that we reverse this */
int const black_video_frames = ceil (-audio_short_by_frames * _frames_per_second / _sample_rate);
- _log->log (String::compose ("Emitting %1 frames of black video", black_video_frames));
+ _log->log (String::compose (N_("Emitting %1 frames of black video"), black_video_frames));
shared_ptr<Image> black (new SimpleImage (_pixel_format.get(), _size.get(), true));
black->make_black ();
}
if (audio_short_by_frames > 0) {
- _log->log (String::compose ("Emitted %1 too few audio frames", audio_short_by_frames));
+ _log->log (String::compose (N_("Emitted %1 too few audio frames"), audio_short_by_frames));
/* Do things in half second blocks as I think there may be limits
to what FFmpeg (and in particular the resampler) can cope with.
}
#include "scaler.h"
+#include "i18n.h"
+
using namespace std;
vector<Scaler const *> Scaler::_scalers;
void
Scaler::setup_scalers ()
{
- _scalers.push_back (new Scaler (SWS_BICUBIC, "bicubic", "Bicubic"));
- _scalers.push_back (new Scaler (SWS_X, "x", "X"));
- _scalers.push_back (new Scaler (SWS_AREA, "area", "Area"));
- _scalers.push_back (new Scaler (SWS_GAUSS, "gauss", "Gaussian"));
- _scalers.push_back (new Scaler (SWS_LANCZOS, "lanczos", "Lanczos"));
- _scalers.push_back (new Scaler (SWS_SINC, "sinc", "Sinc"));
- _scalers.push_back (new Scaler (SWS_SPLINE, "spline", "Spline"));
- _scalers.push_back (new Scaler (SWS_BILINEAR, "bilinear", "Bilinear"));
- _scalers.push_back (new Scaler (SWS_FAST_BILINEAR, "fastbilinear", "Fast Bilinear"));
+ _scalers.push_back (new Scaler (SWS_BICUBIC, N_("bicubic"), _("Bicubic")));
+ _scalers.push_back (new Scaler (SWS_X, N_("x"), _("X")));
+ _scalers.push_back (new Scaler (SWS_AREA, N_("area"), _("Area")));
+ _scalers.push_back (new Scaler (SWS_GAUSS, N_("gauss"), _("Gaussian")));
+ _scalers.push_back (new Scaler (SWS_LANCZOS, N_("lanczos"), _("Lanczos")));
+ _scalers.push_back (new Scaler (SWS_SINC, N_("sinc"), _("Sinc")));
+ _scalers.push_back (new Scaler (SWS_SPLINE, N_("spline"), _("Spline")));
+ _scalers.push_back (new Scaler (SWS_BILINEAR, N_("bilinear"), _("Bilinear")));
+ _scalers.push_back (new Scaler (SWS_FAST_BILINEAR, N_("fastbilinear"), _("Fast Bilinear")));
}
/** @param id One of our ids.
#include "log.h"
#include "film.h"
+#include "i18n.h"
+
using std::string;
using std::stringstream;
using std::min;
{
session = ssh_new ();
if (session == 0) {
- throw NetworkError ("Could not start SSH session");
+ throw NetworkError (_("could not start SSH session"));
}
}
{
scp = ssh_scp_new (s, SSH_SCP_WRITE | SSH_SCP_RECURSIVE, Config::instance()->tms_path().c_str ());
if (!scp) {
- throw NetworkError (String::compose ("Could not start SCP session (%1)", ssh_get_error (s)));
+ throw NetworkError (String::compose (_("could not start SCP session (%1)"), ssh_get_error (s)));
}
}
SCPDCPJob::SCPDCPJob (shared_ptr<Film> f)
: Job (f)
- , _status ("Waiting")
+ , _status (_("Waiting"))
{
}
string
SCPDCPJob::name () const
{
- return "Copy DCP to TMS";
+ return _("Copy DCP to TMS");
}
void
SCPDCPJob::run ()
{
- _film->log()->log ("SCP DCP job starting");
+ _film->log()->log (N_("SCP DCP job starting"));
SSHSession ss;
- set_status ("connecting");
+ set_status (_("connecting"));
ssh_options_set (ss.session, SSH_OPTIONS_HOST, Config::instance()->tms_ip().c_str ());
ssh_options_set (ss.session, SSH_OPTIONS_USER, Config::instance()->tms_user().c_str ());
int r = ss.connect ();
if (r != SSH_OK) {
- throw NetworkError (String::compose ("Could not connect to server %1 (%2)", Config::instance()->tms_ip(), ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("Could not connect to server %1 (%2)"), Config::instance()->tms_ip(), ssh_get_error (ss.session)));
}
int const state = ssh_is_server_known (ss.session);
if (state == SSH_SERVER_ERROR) {
- throw NetworkError (String::compose ("SSH error (%1)", ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("SSH error (%1)"), ssh_get_error (ss.session)));
}
r = ssh_userauth_password (ss.session, 0, Config::instance()->tms_password().c_str ());
if (r != SSH_AUTH_SUCCESS) {
- throw NetworkError (String::compose ("Failed to authenticate with server (%1)", ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("Failed to authenticate with server (%1)"), ssh_get_error (ss.session)));
}
SSHSCP sc (ss.session);
r = ssh_scp_init (sc.scp);
if (r != SSH_OK) {
- throw NetworkError (String::compose ("Could not start SCP session (%1)", ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("Could not start SCP session (%1)"), ssh_get_error (ss.session)));
}
r = ssh_scp_push_directory (sc.scp, _film->dcp_name().c_str(), S_IRWXU);
if (r != SSH_OK) {
- throw NetworkError (String::compose ("Could not create remote directory %1 (%2)", _film->dcp_name(), ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("Could not create remote directory %1 (%2)"), _film->dcp_name(), ssh_get_error (ss.session)));
}
string const dcp_dir = _film->dir (_film->dcp_name());
string const leaf = boost::filesystem::path(*i).leaf().generic_string ();
- set_status ("copying " + leaf);
+ set_status (String::compose (_("copying %1"), leaf));
boost::uintmax_t to_do = boost::filesystem::file_size (*i);
ssh_scp_push_file (sc.scp, leaf.c_str(), to_do, S_IRUSR | S_IWUSR);
- FILE* f = fopen (boost::filesystem::path (*i).string().c_str(), "rb");
+ FILE* f = fopen (boost::filesystem::path (*i).string().c_str(), N_("rb"));
if (f == 0) {
- throw NetworkError (String::compose ("Could not open %1 to send", *i));
+ throw NetworkError (String::compose (_("Could not open %1 to send"), *i));
}
while (to_do > 0) {
r = ssh_scp_write (sc.scp, buffer, t);
if (r != SSH_OK) {
- throw NetworkError (String::compose ("Could not write to remote file (%1)", ssh_get_error (ss.session)));
+ throw NetworkError (String::compose (_("Could not write to remote file (%1)"), ssh_get_error (ss.session)));
}
to_do -= t;
bytes_transferred += t;
}
set_progress (1);
- set_status ("");
+ set_status (N_(""));
set_state (FINISHED_OK);
}
stringstream s;
s << Job::status ();
if (!_status.empty ()) {
- s << "; " << _status;
+ s << N_("; ") << _status;
}
return s.str ();
}
#include "config.h"
#include "subtitle.h"
+#include "i18n.h"
+
using std::string;
using std::stringstream;
using std::multimap;
ServerDescription::create_from_metadata (string v)
{
vector<string> b;
- split (b, v, is_any_of (" "));
+ split (b, v, is_any_of (N_(" ")));
if (b.size() != 2) {
return 0;
ServerDescription::as_metadata () const
{
stringstream s;
- s << _host_name << " " << _threads;
+ s << _host_name << N_(" ") << _threads;
return s.str ();
}
stringstream s (buffer.get());
multimap<string, string> kv = read_key_value (s);
- if (get_required_string (kv, "encode") != "please") {
+ if (get_required_string (kv, N_("encode")) != N_("please")) {
return -1;
}
- libdcp::Size in_size (get_required_int (kv, "input_width"), get_required_int (kv, "input_height"));
- int pixel_format_int = get_required_int (kv, "input_pixel_format");
- libdcp::Size out_size (get_required_int (kv, "output_width"), get_required_int (kv, "output_height"));
- int padding = get_required_int (kv, "padding");
- int subtitle_offset = get_required_int (kv, "subtitle_offset");
- float subtitle_scale = get_required_float (kv, "subtitle_scale");
- string scaler_id = get_required_string (kv, "scaler");
- int frame = get_required_int (kv, "frame");
- int frames_per_second = get_required_int (kv, "frames_per_second");
- string post_process = get_optional_string (kv, "post_process");
- int colour_lut_index = get_required_int (kv, "colour_lut");
- int j2k_bandwidth = get_required_int (kv, "j2k_bandwidth");
- Position subtitle_position (get_optional_int (kv, "subtitle_x"), get_optional_int (kv, "subtitle_y"));
- libdcp::Size subtitle_size (get_optional_int (kv, "subtitle_width"), get_optional_int (kv, "subtitle_height"));
+ libdcp::Size in_size (get_required_int (kv, N_("input_width")), get_required_int (kv, N_("input_height")));
+ int pixel_format_int = get_required_int (kv, N_("input_pixel_format"));
+ libdcp::Size out_size (get_required_int (kv, N_("output_width")), get_required_int (kv, N_("output_height")));
+ int padding = get_required_int (kv, N_("padding"));
+ int subtitle_offset = get_required_int (kv, N_("subtitle_offset"));
+ float subtitle_scale = get_required_float (kv, N_("subtitle_scale"));
+ string scaler_id = get_required_string (kv, N_("scaler"));
+ int frame = get_required_int (kv, N_("frame"));
+ int frames_per_second = get_required_int (kv, N_("frames_per_second"));
+ string post_process = get_optional_string (kv, N_("post_process"));
+ int colour_lut_index = get_required_int (kv, N_("colour_lut"));
+ int j2k_bandwidth = get_required_int (kv, N_("j2k_bandwidth"));
+ Position subtitle_position (get_optional_int (kv, N_("subtitle_x")), get_optional_int (kv, N_("subtitle_y")));
+ libdcp::Size subtitle_size (get_optional_int (kv, N_("subtitle_width")), get_optional_int (kv, N_("subtitle_height")));
/* This checks that colour_lut_index is within range */
colour_lut_index_to_name (colour_lut_index);
encoded->send (socket);
} catch (std::exception& e) {
_log->log (String::compose (
- "Send failed; frame %1, data size %2, pixel format %3, image size %4x%5, %6 components",
+ N_("Send failed; frame %1, data size %2, pixel format %3, image size %4x%5, %6 components"),
frame, encoded->size(), image->pixel_format(), image->size().width, image->size().height, image->components()
)
);
try {
frame = process (socket);
} catch (std::exception& e) {
- _log->log (String::compose ("Error: %1", e.what()));
+ _log->log (String::compose (N_("Error: %1"), e.what()));
}
socket.reset ();
if (frame >= 0) {
struct timeval end;
gettimeofday (&end, 0);
- _log->log (String::compose ("Encoded frame %1 in %2", frame, seconds (end) - seconds (start)));
+ _log->log (String::compose (N_("Encoded frame %1 in %2"), frame, seconds (end) - seconds (start)));
}
_worker_condition.notify_all ();
void
Server::run (int num_threads)
{
- _log->log (String::compose ("Server starting with %1 threads", num_threads));
+ _log->log (String::compose (N_("Server starting with %1 threads"), num_threads));
for (int i = 0; i < num_threads; ++i) {
_worker_threads.push_back (new thread (bind (&Server::worker_thread, this)));
#include "ffmpeg_decoder.h"
#include "external_audio_decoder.h"
+#include "i18n.h"
+
using std::string;
using std::stringstream;
using boost::shared_ptr;
string
SubtitleStream::to_string () const
{
- return String::compose ("%1 %2", _id, _name);
+ return String::compose (N_("%1 %2"), _id, _name);
}
/** Create a SubtitleStream from a value returned from to_string().
#include "image.h"
#include "exceptions.h"
+#include "i18n.h"
+
using namespace std;
using namespace boost;
using libdcp::Size;
_to = packet_time + (double (sub.end_display_time) / 1e3);
if (sub.num_rects > 1) {
- throw DecodeError ("multi-part subtitles not yet supported");
+ throw DecodeError (_("multi-part subtitles not yet supported"));
}
AVSubtitleRect const * rect = sub.rects[0];
if (rect->type != SUBTITLE_BITMAP) {
- throw DecodeError ("non-bitmap subtitles not yet supported");
+ throw DecodeError (_("non-bitmap subtitles not yet supported"));
}
shared_ptr<Image> image (new SimpleImage (PIX_FMT_RGBA, libdcp::Size (rect->w, rect->h), true));
#include "timer.h"
#include "util.h"
+#include "i18n.h"
+
using namespace std;
/** @param n Name to use when giving output */
{
struct timeval stop;
gettimeofday (&stop, 0);
- cout << "T: " << _name << ": " << (seconds (stop) - seconds (_start)) << "\n";
+ cout << N_("T: ") << _name << N_(": ") << (seconds (stop) - seconds (_start)) << N_("\n");
}
/** @param n Name to use when giving output.
}
- set_state ("");
+ set_state (N_(""));
- cout << _name << ":\n";
+ cout << _name << N_(":\n");
for (map<string, double>::iterator i = _totals.begin(); i != _totals.end(); ++i) {
- cout << "\t" << i->first << " " << i->second << "\n";
+ cout << N_("\t") << i->first << " " << i->second << N_("\n");
}
}
{
try {
- _film->log()->log ("Transcode job starting");
- _film->log()->log (String::compose ("Audio delay is %1ms", _film->audio_delay()));
+ _film->log()->log (N_("Transcode job starting"));
+ _film->log()->log (String::compose (N_("Audio delay is %1ms"), _film->audio_delay()));
_encoder.reset (new Encoder (_film));
Transcoder w (_film, _decode_opt, this, _encoder);
_film->set_dcp_intrinsic_duration (_encoder->video_frames_out ());
- _film->log()->log ("Transcode job completed successfully");
- _film->log()->log (String::compose ("DCP intrinsic duration is %1", _encoder->video_frames_out()));
+ _film->log()->log (N_("Transcode job completed successfully"));
+ _film->log()->log (String::compose (N_("DCP intrinsic duration is %1"), _encoder->video_frames_out()));
} catch (std::exception& e) {
set_progress (1);
set_state (FINISHED_ERROR);
- _film->log()->log (String::compose ("Transcode job failed (%1)", e.what()));
+ _film->log()->log (String::compose (N_("Transcode job failed (%1)"), e.what()));
throw;
}
TranscodeJob::status () const
{
if (!_encoder) {
- return "0%";
+ return _("0%");
}
float const fps = _encoder->current_frames_per_second ();
s << Job::status ();
if (!finished ()) {
- s << "; " << fixed << setprecision (1) << fps << " " << _("frames per second");
+ s << N_("; ") << fixed << setprecision (1) << fps << N_(" ") << _("frames per second");
}
return s.str ();
#include "sound_processor.h"
#include "config.h"
+#include "i18n.h"
+
using namespace std;
using namespace boost;
using libdcp::Size;
m -= (h * 60);
stringstream hms;
- hms << h << ":";
+ hms << h << N_(":");
hms.width (2);
- hms << setfill ('0') << m << ":";
+ hms << setfill ('0') << m << N_(":");
hms.width (2);
hms << setfill ('0') << s;
if (h > 0) {
if (m > 30) {
- ap << (h + 1) << " hours";
+ ap << (h + 1) << N_(" ") << _("hours");
} else {
if (h == 1) {
- ap << "1 hour";
+ ap << N_("1 ") << _("hour");
} else {
- ap << h << " hours";
+ ap << h << N_(" ") << _("hours");
}
}
} else if (m > 0) {
if (m == 1) {
- ap << "1 minute";
+ ap << N_("1 ") << _("minute");
} else {
- ap << m << " minutes";
+ ap << m << N_(" ") << _("minutes");
}
} else {
- ap << s << " seconds";
+ ap << s << N_(" ") << _("seconds");
}
return ap.str ();
static string
demangle (string l)
{
- string::size_type const b = l.find_first_of ("(");
+ string::size_type const b = l.find_first_of (N_("("));
if (b == string::npos) {
return l;
}
- string::size_type const p = l.find_last_of ("+");
+ string::size_type const p = l.find_last_of (N_("+"));
if (p == string::npos) {
return l;
}
if (strings) {
for (i = 0; i < size && (levels == 0 || i < size_t(levels)); i++) {
- out << " " << demangle (strings[i]) << endl;
+ out << N_(" ") << demangle (strings[i]) << endl;
}
free (strings);
ffmpeg_version_to_string (int v)
{
stringstream s;
- s << ((v & 0xff0000) >> 16) << "." << ((v & 0xff00) >> 8) << "." << (v & 0xff);
+ s << ((v & 0xff0000) >> 16) << N_(".") << ((v & 0xff00) >> 8) << N_(".") << (v & 0xff);
return s.str ();
}
dependency_version_summary ()
{
stringstream s;
- s << "libopenjpeg " << opj_version () << ", "
- << "libavcodec " << ffmpeg_version_to_string (avcodec_version()) << ", "
- << "libavfilter " << ffmpeg_version_to_string (avfilter_version()) << ", "
- << "libavformat " << ffmpeg_version_to_string (avformat_version()) << ", "
- << "libavutil " << ffmpeg_version_to_string (avutil_version()) << ", "
- << "libpostproc " << ffmpeg_version_to_string (postproc_version()) << ", "
- << "libswscale " << ffmpeg_version_to_string (swscale_version()) << ", "
- << MagickVersion << ", "
- << "libssh " << ssh_version (0) << ", "
- << "libdcp " << libdcp::version << " git " << libdcp::git_commit;
+ s << N_("libopenjpeg ") << opj_version () << N_(", ")
+ << N_("libavcodec ") << ffmpeg_version_to_string (avcodec_version()) << N_(", ")
+ << N_("libavfilter ") << ffmpeg_version_to_string (avfilter_version()) << N_(", ")
+ << N_("libavformat ") << ffmpeg_version_to_string (avformat_version()) << N_(", ")
+ << N_("libavutil ") << ffmpeg_version_to_string (avutil_version()) << N_(", ")
+ << N_("libpostproc ") << ffmpeg_version_to_string (postproc_version()) << N_(", ")
+ << N_("libswscale ") << ffmpeg_version_to_string (swscale_version()) << N_(", ")
+ << MagickVersion << N_(", ")
+ << N_("libssh ") << ssh_version (0) << N_(", ")
+ << N_("libdcp ") << libdcp::version << N_(" git ") << libdcp::git_commit;
return s.str ();
}
void
dvdomatic_setup ()
{
+ bindtextdomain ("libdvdomatic", LOCALE_DIR);
+
avfilter_register_all ();
Format::setup_formats ();
crop_string (Position start, libdcp::Size size)
{
stringstream s;
- s << "crop=" << size.width << ":" << size.height << ":" << start.x << ":" << start.y;
+ s << N_("crop=") << size.width << N_(":") << size.height << N_(":") << start.x << N_(":") << start.y;
return s.str ();
}
for (string::size_type i = 0; i < s.length(); ++i) {
if (s[i] == ' ' && !in_quotes) {
out.push_back (c);
- c = "";
+ c = N_("");
} else if (s[i] == '"') {
in_quotes = !in_quotes;
} else {
}
if (!best) {
- throw EncodeError ("cannot find a suitable DCP frame rate for this source");
+ throw EncodeError (_("cannot find a suitable DCP frame rate for this source"));
}
frames_per_second = best->dcp;
{
switch (index) {
case 0:
- return "sRGB";
+ return _("sRGB");
case 1:
- return "Rec 709";
+ return _("Rec 709");
}
assert (false);
- return "";
+ return N_("");
}
Socket::Socket (int timeout)
} while (ec == asio::error::would_block);
if (ec || !_socket.is_open ()) {
- throw NetworkError ("connect timed out");
+ throw NetworkError (_("connect timed out"));
}
}
get_required_string (multimap<string, string> const & kv, string k)
{
if (kv.count (k) > 1) {
- throw StringError ("unexpected multiple keys in key-value set");
+ throw StringError (N_("unexpected multiple keys in key-value set"));
}
multimap<string, string>::const_iterator i = kv.find (k);
if (i == kv.end ()) {
- throw StringError (String::compose ("missing key %1 in key-value set", k));
+ throw StringError (String::compose (_("missing key %1 in key-value set"), k));
}
return i->second;
get_optional_string (multimap<string, string> const & kv, string k)
{
if (kv.count (k) > 1) {
- throw StringError ("unexpected multiple keys in key-value set");
+ throw StringError (N_("unexpected multiple keys in key-value set"));
}
multimap<string, string>::const_iterator i = kv.find (k);
if (i == kv.end ()) {
- return "";
+ return N_("");
}
return i->second;
get_optional_int (multimap<string, string> const & kv, string k)
{
if (kv.count (k) > 1) {
- throw StringError ("unexpected multiple keys in key-value set");
+ throw StringError (N_("unexpected multiple keys in key-value set"));
}
multimap<string, string>::const_iterator i = kv.find (k);
transform (ext.begin(), ext.end(), ext.begin(), ::tolower);
- return (ext == ".tif" || ext == ".tiff" || ext == ".jpg" || ext == ".jpeg" || ext == ".png" || ext == ".bmp");
+ return (ext == N_(".tif") || ext == N_(".tiff") || ext == N_(".jpg") || ext == N_(".jpeg") || ext == N_(".png") || ext == N_(".bmp"));
}
/** @return A pair containing CPU model name and the number of processors */
info.second = 0;
#ifdef DVDOMATIC_POSIX
- ifstream f ("/proc/cpuinfo");
+ ifstream f (N_("/proc/cpuinfo"));
while (f.good ()) {
string l;
getline (f, l);
- if (boost::algorithm::starts_with (l, "model name")) {
+ if (boost::algorithm::starts_with (l, N_("model name"))) {
string::size_type const c = l.find (':');
if (c != string::npos) {
info.first = l.substr (c + 2);
}
- } else if (boost::algorithm::starts_with (l, "processor")) {
+ } else if (boost::algorithm::starts_with (l, N_("processor"))) {
++info.second;
}
}
#include "options.h"
#include "job.h"
+#include "i18n.h"
+
using boost::shared_ptr;
using boost::optional;
void
VideoDecoder::signal_video (shared_ptr<Image> image, bool same, shared_ptr<Subtitle> sub)
{
- TIMING ("Decoder emits %1", _video_frame);
+ TIMING (N_("Decoder emits %1"), _video_frame);
Video (image, same, sub);
++_video_frame;
#include "log.h"
#include "dcp_video_frame.h"
+#include "i18n.h"
+
using std::make_pair;
using std::pair;
using std::string;
_sound_asset.reset (
new libdcp::SoundAsset (
_film->dir (_film->dcp_name()),
- "audio.mxf",
+ N_("audio.mxf"),
DCPFrameRate (_film->frames_per_second()).frames_per_second,
dcp_audio_channels (_film->audio_channels()),
dcp_audio_sample_rate (_film->audio_stream()->sample_rate())
break;
}
- TIMING ("writer sleeps with a queue of %1", _queue.size());
+ TIMING (N_("writer sleeps with a queue of %1"), _queue.size());
_condition.wait (lock);
- TIMING ("writer wakes with a queue of %1", _queue.size());
+ TIMING (N_("writer wakes with a queue of %1"), _queue.size());
}
if (_finish && _queue.empty()) {
switch (qi.type) {
case QueueItem::FULL:
{
- _film->log()->log (String::compose ("Writer FULL-writes %1 to MXF", qi.frame));
+ _film->log()->log (String::compose (N_("Writer FULL-writes %1 to MXF"), qi.frame));
if (!qi.encoded) {
qi.encoded.reset (new EncodedData (_film->j2c_path (qi.frame, false)));
}
break;
}
case QueueItem::FAKE:
- _film->log()->log (String::compose ("Writer FAKE-writes %1 to MXF", qi.frame));
+ _film->log()->log (String::compose (N_("Writer FAKE-writes %1 to MXF"), qi.frame));
_picture_asset_writer->fake_write (qi.size);
_last_written.reset ();
++_fake_written;
break;
case QueueItem::REPEAT:
{
- _film->log()->log (String::compose ("Writer REPEAT-writes %1 to MXF", qi.frame));
+ _film->log()->log (String::compose (N_("Writer REPEAT-writes %1 to MXF"), qi.frame));
libdcp::FrameInfo const fin = _picture_asset_writer->write (_last_written->data(), _last_written->size());
_last_written->write_info (_film, qi.frame, fin);
++_repeat_written;
++_pushed_to_disk;
lock.unlock ();
- _film->log()->log (String::compose ("Writer full (awaiting %1); pushes %2 to disk", _last_written_frame + 1, qi.frame));
+ _film->log()->log (String::compose (N_("Writer full (awaiting %1); pushes %2 to disk"), _last_written_frame + 1, qi.frame));
qi.encoded->write (_film, qi.frame);
lock.lock ();
qi.encoded.reset ();
boost::filesystem::path to;
to /= _film->dir (_film->dcp_name());
- to /= "video.mxf";
+ to /= N_("video.mxf");
boost::filesystem::create_hard_link (from, to);
/* And update the asset */
_picture_asset->set_directory (_film->dir (_film->dcp_name ()));
- _picture_asset->set_file_name ("video.mxf");
+ _picture_asset->set_file_name (N_("video.mxf"));
if (_sound_asset) {
_sound_asset->set_entry_point (_film->trim_start ());
dcp.write_xml ();
- _film->log()->log (String::compose ("Wrote %1 FULL, %2 FAKE, %3 REPEAT; %4 pushed to disk", _full_written, _fake_written, _repeat_written, _pushed_to_disk));
+ _film->log()->log (String::compose (N_("Wrote %1 FULL, %2 FAKE, %3 REPEAT; %4 pushed to disk"), _full_written, _fake_written, _repeat_written, _pushed_to_disk));
}
/** Tell the writer that frame `f' should be a repeat of the frame before it */
boost::filesystem::path p;
p /= _film->video_mxf_dir ();
p /= _film->video_mxf_filename ();
- FILE* mxf = fopen (p.string().c_str(), "rb");
+ FILE* mxf = fopen (p.string().c_str(), N_("rb"));
if (!mxf) {
return;
}
string const existing_hash = md5_digest (data.data(), data.size());
if (existing_hash != info.hash) {
- _film->log()->log (String::compose ("Existing frame %1 failed hash check", _first_nonexistant_frame));
+ _film->log()->log (String::compose (N_("Existing frame %1 failed hash check"), _first_nonexistant_frame));
break;
}
- _film->log()->log (String::compose ("Have existing frame %1", _first_nonexistant_frame));
+ _film->log()->log (String::compose (N_("Have existing frame %1"), _first_nonexistant_frame));
++_first_nonexistant_frame;
}
if (wxLocale::IsAvailable (language)) {
locale = new wxLocale (language, wxLOCALE_LOAD_DEFAULT);
-
+
#ifdef __WXGTK__
- locale->AddCatalogLookupPathPrefix (wxT ("/usr"));
- locale->AddCatalogLookupPathPrefix (wxT ("/usr/local"));
- locale->AddCatalogLookupPathPrefix (wxT ("build/src/wx/mo"));
- locale->AddCatalogLookupPathPrefix (wxT ("build/src/tools/mo"));
- wxStandardPaths* paths = (wxStandardPaths*) &wxStandardPaths::Get();
- wxString prefix = paths->GetInstallPrefix();
- locale->AddCatalogLookupPathPrefix (prefix);
+ locale->AddCatalogLookupPathPrefix (wxT (LOCALE_DIR));
#endif
locale->AddCatalog ("libdvdomatic-wx");
_reference_scaler->Connect (wxID_ANY, wxEVT_COMMAND_CHOICE_SELECTED, wxCommandEventHandler (ConfigDialog::reference_scaler_changed), 0, this);
pair<string, string> p = Filter::ffmpeg_strings (config->reference_filters ());
- _reference_filters->SetLabel (std_to_wx (p.first + " " + p.second));
+ _reference_filters->SetLabel (std_to_wx (p.first + N_(" ") + p.second));
_reference_filters_button->Connect (wxID_ANY, wxEVT_COMMAND_BUTTON_CLICKED, wxCommandEventHandler (ConfigDialog::edit_reference_filters_clicked), 0, this);
vector<ServerDescription*> servers = config->servers ();
{
Config::instance()->set_reference_filters (f);
pair<string, string> p = Filter::ffmpeg_strings (Config::instance()->reference_filters ());
- _reference_filters->SetLabel (std_to_wx (p.first + " " + p.second));
+ _reference_filters->SetLabel (std_to_wx (p.first + N_(" ") + p.second));
}
void
_still_duration = new wxSpinCtrl (_film_panel);
still_control (_still_duration);
s->Add (_still_duration, 1, wxEXPAND);
- /* TRANSLATORS: `s' here is an abbreviation for seconds, the unit of time */
+ /// TRANSLATORS: `s' here is an abbreviation for seconds, the unit of time
still_control (add_label_to_sizer (s, _film_panel, _("s")));
grid->Add (s);
}
wxBoxSizer* s = new wxBoxSizer (wxHORIZONTAL);
_audio_delay = new wxSpinCtrl (_audio_panel);
s->Add (video_control (_audio_delay), 1);
- /* TRANSLATORS: this is an abbreviation for milliseconds, the unit of time */
+ /// TRANSLATORS: this is an abbreviation for milliseconds, the unit of time
video_control (add_label_to_sizer (s, _audio_panel, _("ms")));
grid->Add (s);
}
assert (MAX_AUDIO_CHANNELS == 6);
- /* TRANSLATORS: these are the names of audio channels; Lfe (sub) is the low-frequency
- enhancement channel (sub-woofer)./
- */
+ /// TRANSLATORS: these are the names of audio channels; Lfe (sub) is the low-frequency
+ /// enhancement channel (sub-woofer).
wxString const channels[] = {
_("Left"),
_("Right"),
break;
case Film::LENGTH:
if (_film->frames_per_second() > 0 && _film->length()) {
- s << _film->length().get() << " frames; " << seconds_to_hms (_film->length().get() / _film->frames_per_second());
+ s << _film->length().get() << " " << _("frames") << "; " << seconds_to_hms (_film->length().get() / _film->frames_per_second());
} else if (_film->length()) {
- s << _film->length().get() << " frames";
+ s << _film->length().get() << " " << _("frames");
}
_length->SetLabel (std_to_wx (s.str ()));
if (_film->length()) {
if (_film) {
FileChanged (_film->directory ());
} else {
- FileChanged ("");
+ FileChanged (N_(""));
}
film_changed (Film::NAME);
} else {
stringstream s;
if (_film->audio_stream()->channels() == 1) {
- s << "1 channel";
+ s << _("1 channel");
} else {
- s << _film->audio_stream()->channels () << " channels";
+ s << _film->audio_stream()->channels () << " " << _("channels");
}
- s << ", " << _film->audio_stream()->sample_rate() << "Hz";
+ s << ", " << _film->audio_stream()->sample_rate() << _("Hz");
_audio->SetLabel (std_to_wx (s.str ()));
}
}
{
string s = _film->dcp_name (true);
if (s.length() > 28) {
- _dcp_name->SetLabel (std_to_wx (s.substr (0, 28) + "..."));
+ _dcp_name->SetLabel (std_to_wx (s.substr (0, 28) + N_("...")));
_dcp_name->SetToolTip (std_to_wx (s));
} else {
_dcp_name->SetLabel (std_to_wx (s));
: wxPanel (p)
, _panel (new wxPanel (this))
, _slider (new wxSlider (this, wxID_ANY, 0, 0, 4096))
- , _play_button (new wxToggleButton (this, wxID_ANY, wxT ("Play")))
+ , _play_button (new wxToggleButton (this, wxID_ANY, _("Play")))
, _display_frame_x (0)
, _got_frame (false)
, _clear_required (false)
_frames->SetLabel (std_to_wx (lexical_cast<string> (_film->length().get())));
double const disk = ((double) _film->j2k_bandwidth() / 8) * _film->length().get() / (_film->frames_per_second () * 1073741824);
stringstream s;
- s << fixed << setprecision (1) << disk << "Gb";
+ s << fixed << setprecision (1) << disk << _("Gb");
_disk->SetLabel (std_to_wx (s.str ()));
} else {
_frames->SetLabel (_("unknown"));
if (server) {
_server = server;
} else {
- _server = new ServerDescription ("localhost", 1);
+ _server = new ServerDescription (N_("localhost"), 1);
}
wxFlexGridSizer* table = new wxFlexGridSizer (2, 4, 4);
if conf.options.target_windows:
conf.load('winres')
- conf.env.append_value('CXXFLAGS', ['-D__STDC_CONSTANT_MACROS', '-msse', '-mfpmath=sse', '-ffast-math', '-fno-strict-aliasing', '-Wall', '-Wno-attributes', '-Wextra'])
+ conf.env.append_value('CXXFLAGS', ['-D__STDC_CONSTANT_MACROS', '-msse', '-mfpmath=sse', '-ffast-math', '-fno-strict-aliasing',
+ '-Wall', '-Wno-attributes', '-Wextra',
+ '-DLOCALE_DIR="%s/share/locale"' % conf.env.prefix])
if conf.options.target_windows:
conf.env.append_value('CXXFLAGS', ['-DDVDOMATIC_WINDOWS', '-DWIN32_LEAN_AND_MEAN', '-DBOOST_USE_WINDOWS_H', '-DUNICODE'])