+ // Keep these sorted alphabetically!
+ LexerKeyword languageTags[] = {
+ { "activechars", LA_ACTIVECHARS },
+ { "babelname", LA_BABELNAME },
+ { "dateformats", LA_DATEFORMATS },
+ { "encoding", LA_ENCODING },
+ { "end", LA_END },
+ { "fontencoding", LA_FONTENC },
+ { "guiname", LA_GUINAME },
+ { "hasguisupport", LA_HAS_GUI_SUPPORT },
+ { "internalencoding", LA_INTERNAL_ENC },
+ { "langcode", LA_LANG_CODE },
+ { "langvariety", LA_LANG_VARIETY },
+ { "polyglossianame", LA_POLYGLOSSIANAME },
+ { "polyglossiaopts", LA_POLYGLOSSIAOPTS },
+ { "postbabelpreamble", LA_POSTBABELPREAMBLE },
+ { "prebabelpreamble", LA_PREBABELPREAMBLE },
+ { "provides", LA_PROVIDES },
+ { "quotestyle", LA_QUOTESTYLE },
+ { "requires", LA_REQUIRES },
+ { "rtl", LA_RTL },
+ { "wordwrap", LA_WORDWRAP },
+ { "xindyname", LA_XINDYNAME }
+ };
+
+ bool error = false;
+ bool finished = false;
+ lex.pushTable(languageTags);
+ // parse style section
+ while (!finished && lex.isOK() && !error) {
+ int le = lex.lex();
+ // See comment in LyXRC.cpp.
+ switch (le) {
+ case Lexer::LEX_FEOF:
+ continue;
+
+ case Lexer::LEX_UNDEF: // parse error
+ lex.printError("Unknown language tag `$$Token'");
+ error = true;
+ continue;
+
+ default:
+ break;
+ }
+ switch (static_cast<LanguageTags>(le)) {
+ case LA_END: // end of structure
+ finished = true;
+ break;
+ case LA_BABELNAME:
+ lex >> babel_;
+ break;
+ case LA_POLYGLOSSIANAME:
+ lex >> polyglossia_name_;
+ break;
+ case LA_POLYGLOSSIAOPTS:
+ lex >> polyglossia_opts_;
+ break;
+ case LA_XINDYNAME:
+ lex >> xindy_;
+ break;
+ case LA_QUOTESTYLE:
+ lex >> quote_style_;
+ break;
+ case LA_ACTIVECHARS:
+ lex >> active_chars_;
+ break;
+ case LA_ENCODING:
+ lex >> encodingStr_;
+ break;
+ case LA_FONTENC: {
+ lex.eatLine();
+ vector<string> const fe =
+ getVectorFromString(lex.getString(true), "|");
+ fontenc_.insert(fontenc_.end(), fe.begin(), fe.end());
+ break;
+ }
+ case LA_DATEFORMATS: {
+ lex.eatLine();
+ vector<string> const df =
+ getVectorFromString(trim(lex.getString(true), "\""), "|");
+ dateformats_.insert(dateformats_.end(), df.begin(), df.end());
+ break;
+ }
+ case LA_GUINAME:
+ lex >> display_;
+ break;
+ case LA_HAS_GUI_SUPPORT:
+ lex >> has_gui_support_;
+ break;
+ case LA_INTERNAL_ENC:
+ lex >> internal_enc_;
+ break;
+ case LA_LANG_CODE:
+ lex >> code_;
+ break;
+ case LA_LANG_VARIETY:
+ lex >> variety_;
+ break;
+ case LA_POSTBABELPREAMBLE:
+ babel_postsettings_ =
+ lex.getLongString(from_ascii("EndPostBabelPreamble"));
+ break;
+ case LA_PREBABELPREAMBLE:
+ babel_presettings_ =
+ lex.getLongString(from_ascii("EndPreBabelPreamble"));
+ break;
+ case LA_REQUIRES:
+ lex >> required_;
+ break;
+ case LA_PROVIDES:
+ lex >> provides_;
+ break;
+ case LA_RTL:
+ lex >> rightToLeft_;
+ break;
+ case LA_WORDWRAP:
+ lex >> word_wrap_;
+ break;
+ }
+ }
+ lex.popTable();
+ return finished && !error;
+}
+
+
+bool Language::read(Lexer & lex)
+{
+ encoding_ = nullptr;
+ internal_enc_ = false;
+ rightToLeft_ = false;
+
+ if (!lex.next()) {
+ lex.printError("No name given for language: `$$Token'.");
+ return false;
+ }
+
+ lang_ = lex.getString();
+ LYXERR(Debug::INFO, "Reading language " << lang_);
+ if (!readLanguage(lex)) {
+ LYXERR0("Error parsing language `" << lang_ << '\'');
+ return false;
+ }
+
+ encoding_ = encodings.fromLyXName(encodingStr_);
+ if (!encoding_ && !encodingStr_.empty()) {
+ encoding_ = encodings.fromLyXName("iso8859-1");
+ LYXERR0("Unknown encoding " << encodingStr_);
+ }
+ if (fontenc_.empty())
+ fontenc_.push_back("ASCII");
+ if (dateformats_.empty()) {
+ dateformats_.push_back("MMMM dd, yyyy");
+ dateformats_.push_back("MMM dd, yyyy");
+ dateformats_.push_back("M/d/yyyy");
+ }
+ return true;
+}
+
+
+void Language::readLayoutTranslations(Language::TranslationMap const & trans, bool replace)
+{
+ for (auto const & t : trans) {
+ if (replace
+ || layoutTranslations_.find(t.first) == layoutTranslations_.end())
+ layoutTranslations_[t.first] = t.second;
+ }
+}
+
+
+void Languages::read(FileName const & filename)
+{
+ Lexer lex;