char const * const known_coded_ref_commands[] = { "ref", "pageref", "vref",
"vpageref", "formatted", "eqref", 0 };
+char const * const known_refstyle_commands[] = { "algref", "chapref", "corref",
+ "eqref", "enuref", "figref", "fnref", "lemref", "parref", "partref", "propref",
+ "secref", "subref", "tabref", "thmref", 0 };
+
+char const * const known_refstyle_prefixes[] = { "alg", "chap", "cor",
+ "eq", "enu", "fig", "fn", "lem", "par", "part", "prop",
+ "sec", "sub", "tab", "thm", 0 };
+
+
/**
* supported CJK encodings
- * SJIS and Bg5 cannot be supported as they are not
- * supported by iconv
* JIS does not work with LyX's encoding conversion
*/
const char * const supported_CJK_encodings[] = {
-"EUC-JP", "KS", "GB", "UTF8", 0};
+"EUC-JP", "KS", "GB", "UTF8",
+"Bg5", /*"JIS",*/ "SJIS", 0};
/**
* the same as supported_CJK_encodings with their corresponding LyX language name
+ * FIXME: The mapping "UTF8" => "chinese-traditional" is only correct for files
+ * created by LyX.
+ * NOTE: "Bg5", "JIS" and "SJIS" are not supported by LyX, on re-export the
+ * encodings "UTF8", "EUC-JP" and "EUC-JP" will be used.
* please keep this in sync with supported_CJK_encodings line by line!
*/
const char * const supported_CJK_languages[] = {
-"japanese-cjk", "korean", "chinese-simplified", "chinese-traditional", 0};
+"japanese-cjk", "korean", "chinese-simplified", "chinese-traditional",
+"chinese-traditional", /*"japanese-cjk",*/ "japanese-cjk", 0};
/*!
* natbib commands.
char const * const known_coded_phrases[] = {"LyX", "TeX", "LaTeX2e", "LaTeX", 0};
int const known_phrase_lengths[] = {3, 5, 7, 0};
+/// known TIPA combining diacritical marks
+char const * const known_tipa_marks[] = {"textsubwedge", "textsubumlaut",
+"textsubtilde", "textseagull", "textsubbridge", "textinvsubbridge",
+"textsubsquare", "textsubrhalfring", "textsublhalfring", "textsubplus",
+"textovercross", "textsubarch", "textsuperimposetilde", "textraising",
+"textlowering", "textadvancing", "textretracting", "textdoublegrave",
+"texthighrise", "textlowrise", "textrisefall", "textsyllabic",
+"textsubring", 0};
+
+/// TIPA tones that need special handling
+char const * const known_tones[] = {"15", "51", "45", "12", "454", 0};
+
// string to store the float type to be able to determine the type of subfloats
string float_type = "";
}
-void handle_backslash(ostream & os, string const & s)
-{
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\n\\backslash\n";
- else
- os << *it;
- }
-}
-
-
-void handle_ert(ostream & os, string const & s, Context & context)
+void output_ert(ostream & os, string const & s, Context & context)
{
- // We must have a valid layout before outputting the ERT inset.
context.check_layout(os);
- Context newcontext(true, context.textclass);
- begin_inset(os, "ERT");
- os << "\nstatus collapsed\n";
- newcontext.check_layout(os);
for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
if (*it == '\\')
os << "\n\\backslash\n";
else if (*it == '\n') {
- newcontext.new_paragraph(os);
- newcontext.check_layout(os);
+ context.new_paragraph(os);
+ context.check_layout(os);
} else
os << *it;
}
- newcontext.check_end_layout(os);
- end_inset(os);
+ context.check_end_layout(os);
}
-void handle_comment(ostream & os, string const & s, Context & context)
+void output_ert_inset(ostream & os, string const & s, Context & context)
{
- // TODO: Handle this better
+ // We must have a valid layout before outputting the ERT inset.
+ context.check_layout(os);
Context newcontext(true, context.textclass);
+ InsetLayout const & layout = context.textclass.insetLayout(from_ascii("ERT"));
+ if (layout.forcePlainLayout())
+ newcontext.layout = &context.textclass.plainLayout();
begin_inset(os, "ERT");
os << "\nstatus collapsed\n";
- newcontext.check_layout(os);
- handle_backslash(os, s);
- // make sure that our comment is the last thing on the line
- newcontext.new_paragraph(os);
- newcontext.check_layout(os);
- newcontext.check_end_layout(os);
+ output_ert(os, s, newcontext);
end_inset(os);
}
case required:
case req_group:
// This argument contains regular LaTeX
- handle_ert(os, ert + '{', context);
+ output_ert_inset(os, ert + '{', context);
eat_whitespace(p, os, context, false);
if (template_arguments[i] == required)
parse_text(p, os, FLAG_ITEM, outer, context);
break;
}
}
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
}
// If yes, we need to output ERT.
p.pushPosition();
if (inner_flags & FLAG_END)
- p.verbatimEnvironment(inner_type);
+ p.ertEnvironment(inner_type);
else
p.verbatim_item();
p.skip_spaces(true);
}
if (inner_type == "shaded")
ss << "\\begin{shaded}";
- handle_ert(os, ss.str(), parent_context);
+ output_ert_inset(os, ss.str(), parent_context);
if (!inner_type.empty()) {
parse_text(p, os, inner_flags, outer, parent_context);
if (inner_flags & FLAG_END)
- handle_ert(os, "\\end{" + inner_type + '}',
+ output_ert_inset(os, "\\end{" + inner_type + '}',
parent_context);
else
- handle_ert(os, "}", parent_context);
+ output_ert_inset(os, "}", parent_context);
}
if (!outer_type.empty()) {
// If we already read the inner box we have to pop
}
parse_text(p, os, outer_flags, outer, parent_context);
if (outer_flags & FLAG_END)
- handle_ert(os, "\\end{" + outer_type + '}',
+ output_ert_inset(os, "\\end{" + outer_type + '}',
parent_context);
else if (inner_type.empty() && outer_type == "framebox")
// in this case it is already closed later
;
else
- handle_ert(os, "}", parent_context);
+ output_ert_inset(os, "}", parent_context);
}
} else {
// LyX does not like empty positions, so we have
// LyX puts a % after the end of the minipage
if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
// new paragraph
- //handle_comment(os, "%dummy", parent_context);
+ //output_ert_inset(os, "%dummy", parent_context);
p.get_token();
p.skip_spaces();
parent_context.new_paragraph(os);
}
else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
- //handle_comment(os, "%dummy", parent_context);
+ //output_ert_inset(os, "%dummy", parent_context);
p.get_token();
p.skip_spaces();
// We add a protected space if something real follows
context.layout = &parent_context.textclass.plainLayout();
string s;
if (in_line) {
- s = p.plainCommand('!', '!', "lstinline");
- context.new_paragraph(os);
- context.check_layout(os);
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ s = p.verbatimStuff(delim);
+// context.new_paragraph(os);
} else
- s = p.plainEnvironment("lstlisting");
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\n\\backslash\n";
- else if (*it == '\n') {
- // avoid adding an empty paragraph at the end
- if (it + 1 != et) {
- context.new_paragraph(os);
- context.check_layout(os);
- }
- } else
- os << *it;
- }
- context.check_end_layout(os);
+ s = p.verbatimEnvironment("lstlisting");
+ output_ert(os, s, context);
end_inset(os);
}
bool const new_layout_allowed = parent_context.new_layout_allowed;
if (specialfont)
parent_context.new_layout_allowed = false;
- handle_ert(os, "\\begin{" + name + "}", parent_context);
+ output_ert_inset(os, "\\begin{" + name + "}", parent_context);
parse_text_snippet(p, os, flags, outer, parent_context);
- handle_ert(os, "\\end{" + name + "}", parent_context);
+ output_ert_inset(os, "\\end{" + name + "}", parent_context);
if (specialfont)
parent_context.new_layout_allowed = new_layout_allowed;
}
}
else if (name == "verbatim") {
- os << "\n\\end_layout\n\n\\begin_layout Verbatim\n";
- string const s = p.plainEnvironment("verbatim");
- string::const_iterator it2 = s.begin();
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\\backslash ";
- else if (*it == '\n') {
- it2 = it + 1;
- // avoid adding an empty paragraph at the end
- // FIXME: if there are 2 consecutive spaces at the end ignore it
- // because LyX will re-add a \n
- // This hack must be removed once bug 8049 is fixed!
- if ((it + 1 != et) && (it + 2 != et || *it2 != '\n'))
- os << "\n\\end_layout\n\\begin_layout Verbatim\n";
- } else
- os << *it;
- }
- os << "\n\\end_layout\n\n";
+ // FIXME: this should go in the generic code that
+ // handles environments defined in layout file that
+ // have "PassThru 1". However, the code over there is
+ // already too complicated for my taste.
+ parent_context.new_paragraph(os);
+ Context context(true, parent_context.textclass,
+ &parent_context.textclass[from_ascii("Verbatim")]);
+ string s = p.verbatimEnvironment("verbatim");
+ output_ert(os, s, context);
p.skip_spaces();
- // reset to Standard layout
- os << "\n\\begin_layout Standard\n";
+ }
+
+ else if (name == "IPA") {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_inset(os, "IPA\n");
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ end_inset(os);
+ p.skip_spaces();
+ preamble.registerAutomaticallyLoadedPackage("tipa");
+ preamble.registerAutomaticallyLoadedPackage("tipx");
}
else if (name == "CJK") {
- // the scheme is \begin{CJK}{encoding}{mapping}{text}
+ // the scheme is \begin{CJK}{encoding}{mapping}text\end{CJK}
// It is impossible to decide if a CJK environment was in its own paragraph or within
// a line. We therefore always assume a paragraph since the latter is a rare case.
eat_whitespace(p, os, parent_context, false);
// store the encoding to be able to reset it
string const encoding_old = p.getEncoding();
string const encoding = p.getArg('{', '}');
- // SJIS and Bg5 cannot be handled by iconv
- // JIS does not work with LyX's encoding conversion
- if (encoding != "Bg5" && encoding != "JIS" && encoding != "SJIS")
- p.setEncoding(encoding);
- else
- p.setEncoding("utf8");
- // LyX doesn't support the second argument so if
- // this is used we need to output everything as ERT
- string const mapping = p.getArg('{', '}');
+ // FIXME: For some reason JIS does not work. Although the text
+ // in tests/CJK.tex is identical with the SJIS version if you
+ // convert both snippets using the recode command line utility,
+ // the resulting .lyx file contains some extra characters if
+ // you set buggy_encoding to false for JIS.
+ bool const buggy_encoding = encoding == "JIS";
+ if (!buggy_encoding)
+ p.setEncoding(encoding, Encoding::CJK);
+ else {
+ // FIXME: This will read garbage, since the data is not encoded in utf8.
+ p.setEncoding("UTF-8");
+ }
+ // LyX only supports the same mapping for all CJK
+ // environments, so we might need to output everything as ERT
+ string const mapping = trim(p.getArg('{', '}'));
char const * const * const where =
is_known(encoding, supported_CJK_encodings);
- if ((!mapping.empty() && mapping != " ") || !where) {
+ if (!buggy_encoding && !preamble.fontCJKSet())
+ preamble.fontCJK(mapping);
+ bool knownMapping = mapping == preamble.fontCJK();
+ if (buggy_encoding || !knownMapping || !where) {
parent_context.check_layout(os);
- handle_ert(os, "\\begin{" + name + "}{" + encoding + "}{" + mapping + "}",
+ output_ert_inset(os, "\\begin{" + name + "}{" + encoding + "}{" + mapping + "}",
parent_context);
// we must parse the content as verbatim because e.g. JIS can contain
// normally invalid characters
+ // FIXME: This works only for the most simple cases.
+ // Since TeX control characters are not parsed,
+ // things like comments are completely wrong.
string const s = p.plainEnvironment("CJK");
for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
if (*it == '\\')
- handle_ert(os, "\\", parent_context);
+ output_ert_inset(os, "\\", parent_context);
else if (*it == '$')
- handle_ert(os, "$", parent_context);
+ output_ert_inset(os, "$", parent_context);
+ else if (*it == '\n' && it + 1 != et && s.begin() + 1 != it)
+ os << "\n ";
else
os << *it;
}
- handle_ert(os, "\\end{" + name + "}",
+ output_ert_inset(os, "\\end{" + name + "}",
parent_context);
} else {
string const lang =
begin_inset(os, "Flex ");
os << to_utf8(newinsetlayout->name()) << '\n'
<< "status collapsed\n";
- parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
+ if (newinsetlayout->isPassThru()) {
+ string const arg = p.verbatimEnvironment(name);
+ Context context(true, parent_context.textclass,
+ &parent_context.textclass.plainLayout(),
+ parent_context.layout);
+ output_ert(os, arg, parent_context);
+ } else
+ parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
end_inset(os);
}
parse_arguments("\\begin{" + name + "}", arguments, p, os,
outer, parent_context);
if (contents == verbatim)
- handle_ert(os, p.verbatimEnvironment(name),
+ output_ert_inset(os, p.ertEnvironment(name),
parent_context);
else
parse_text_snippet(p, os, FLAG_END, outer,
parent_context);
- handle_ert(os, "\\end{" + name + "}", parent_context);
+ output_ert_inset(os, "\\end{" + name + "}", parent_context);
if (specialfont)
parent_context.new_layout_allowed = new_layout_allowed;
}
LASSERT(t.cat() == catComment, return);
if (!t.cs().empty()) {
context.check_layout(os);
- handle_comment(os, '%' + t.cs(), context);
+ output_ert_inset(os, '%' + t.cs(), context);
if (p.next_token().cat() == catNewline) {
// A newline after a comment line starts a new
// paragraph
// done (we might get called recursively)
context.new_paragraph(os);
} else
- handle_ert(os, "\n", context);
+ output_ert_inset(os, "\n", context);
eat_whitespace(p, os, context, true);
}
} else {
/// can understand
string const normalize_filename(string const & name)
{
- Parser p(trim(name, "\""));
+ Parser p(name);
ostringstream os;
while (p.good()) {
Token const & t = p.get_token();
} else if (t.cs() == "space") {
os << ' ';
p.skip_spaces();
+ } else if (t.cs() == "string") {
+ // Convert \string" to " and \string~ to ~
+ Token const & n = p.next_token();
+ if (n.asInput() != "\"" && n.asInput() != "~")
+ os << t.asInput();
} else
os << t.asInput();
}
- return os.str();
+ // Strip quotes. This is a bit complicated (see latex_path()).
+ string full = os.str();
+ if (!full.empty() && full[0] == '"') {
+ string base = removeExtension(full);
+ string ext = getExtension(full);
+ if (!base.empty() && base[base.length()-1] == '"')
+ // "a b"
+ // "a b".tex
+ return addExtension(trim(base, "\""), ext);
+ if (full[full.length()-1] == '"')
+ // "a b.c"
+ // "a b.c".tex
+ return trim(full, "\"");
+ }
+ return full;
}
// followed by number?
if (p.next_token().cat() == catOther) {
- char c = p.getChar();
- paramtext += c;
+ string s = p.get_token().asInput();
+ paramtext += s;
// number = current arity + 1?
- if (c == arity + '0' + 1)
+ if (s.size() == 1 && s[0] == arity + '0' + 1)
++arity;
else
simple = false;
os << "\n\\def" << ert;
end_inset(os);
} else
- handle_ert(os, command + ert, context);
+ output_ert_inset(os, command + ert, context);
}
Layout const * newlayout = 0;
InsetLayout const * newinsetlayout = 0;
char const * const * where = 0;
- // Store the latest bibliographystyle and nocite{*} option
- // (needed for bibtex inset)
+ // Store the latest bibliographystyle, addcontentslineContent and
+ // nocite{*} option (needed for bibtex inset)
string btprint;
+ string contentslineContent;
string bibliographystyle = "default";
- bool const use_natbib = preamble.isPackageUsed("natbib");
- bool const use_jurabib = preamble.isPackageUsed("jurabib");
+ bool const use_natbib = isProvided("natbib");
+ bool const use_jurabib = isProvided("jurabib");
string last_env;
while (p.good()) {
Token const & t = p.get_token();
<< "Important information:\n"
<< "\\end_layout\n\n"
<< "\\begin_layout Plain Layout\n"
- << "This document is in Japanese (non-CJK).\n"
- << " It was therefore impossible for tex2lyx to determine the correct encoding."
- << " The encoding EUC-JP was assumed. If this is incorrect, please set the correct"
- << " encoding in the document settings.\n"
+ << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
+ << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
+ << " The iconv encoding " << p.getEncoding() << " was used.\n"
+ << " If this is incorrect, you must run the tex2lyx program on the command line\n"
+ << " and specify the encoding using the -e command-line switch.\n"
+ << " In addition, you might want to double check that the desired output encoding\n"
+ << " is correctly selected in Document > Settings > Language.\n"
<< "\\end_layout\n";
end_inset(os);
is_nonCJKJapanese = false;
else
cerr << "Warning: Inserting missing ']' in '"
<< s << "'." << endl;
- handle_ert(os, s, context);
+ output_ert_inset(os, s, context);
}
else if (t.cat() == catLetter) {
for (int i = 1; i < *l && p.next_token().isAlnumASCII(); ++i)
phrase += p.get_token().cs();
if (is_known(phrase, known_coded_phrases)) {
- handle_ert(os, phrase, context);
+ output_ert_inset(os, phrase, context);
handled = true;
break;
} else {
if (context.new_layout_allowed)
context.new_paragraph(os);
else
- handle_ert(os, "\\par ", context);
+ output_ert_inset(os, "\\par ", context);
eat_whitespace(p, os, context, true);
}
p.next_token().character() == '-'))
; // ignore it in {}`` or -{}-
else
- handle_ert(os, "{}", context);
+ output_ert_inset(os, "{}", context);
} else if (next.cat() == catEscape &&
is_known(next.cs(), known_quotes) &&
end.cat() == catEnd) {
p.get_token();
} else {
p.putback();
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os,
FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
} else if (! context.new_layout_allowed) {
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os, FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else if (is_known(next.cs(), known_sizes)) {
// next will change the size, so we must
// reset it here
<< "\n\\shape "
<< context.font.shape << "\n";
} else {
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os, FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
}
}
return;
}
cerr << "stray '}' in text\n";
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
else if (t.cat() == catComment)
// FIXME: Do this in check_layout()!
context.has_item = false;
if (optarg)
- handle_ert(os, "\\item", context);
+ output_ert_inset(os, "\\item", context);
else
- handle_ert(os, "\\item ", context);
+ output_ert_inset(os, "\\item ", context);
}
if (optarg) {
if (context.layout->labeltype != LABEL_MANUAL) {
// would misinterpret the space as
// item delimiter (bug 7663)
if (contains(s, ' ')) {
- handle_ert(os, s, context);
+ output_ert_inset(os, s, context);
} else {
Parser p2(s + ']');
os << parse_text_snippet(p2,
string key = convert_command_inset_arg(p.verbatim_item());
if (contains(label, '\\') || contains(key, '\\')) {
// LyX can't handle LaTeX commands in labels or keys
- handle_ert(os, t.asInput() + '[' + label +
+ output_ert_inset(os, t.asInput() + '[' + label +
"]{" + p.verbatim_item() + '}',
context);
} else {
context.check_layout(os);
// FIXME: This is a hack to prevent paragraph
// deletion if it is empty. Handle this better!
- handle_comment(os,
+ output_ert_inset(os,
"%dummy comment inserted by tex2lyx to "
"ensure that this paragraph is not empty",
context);
for (; it != en; ++it)
preamble.registerAutomaticallyLoadedPackage(*it);
} else
- handle_ert(os,
+ output_ert_inset(os,
"\\date{" + p.verbatim_item() + '}',
context);
}
p.skip_spaces();
context.check_layout(os);
p.skip_spaces();
- begin_inset(os, "Caption\n");
+ begin_inset(os, "Caption Standard\n");
Context newcontext(true, context.textclass, 0, 0, context.font);
newcontext.check_layout(os);
// FIXME InsetArgument is now properly implemented in InsetLayout
// we must make sure that the caption gets a \begin_layout
os << "\n\\begin_layout Plain Layout";
p.skip_spaces();
- begin_inset(os, "Caption\n");
+ begin_inset(os, "Caption Standard\n");
Context newcontext(true, context.textclass,
0, 0, context.font);
newcontext.check_layout(os);
// output it as ERT
if (p.hasOpt()) {
string opt_arg = convert_command_inset_arg(p.getArg('[', ']'));
- handle_ert(os, t.asInput() + '[' + opt_arg +
+ output_ert_inset(os, t.asInput() + '[' + opt_arg +
"]{" + p.verbatim_item() + '}', context);
} else
- handle_ert(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
+ output_ert_inset(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
}
}
if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
os << s;
else
- handle_ert(os, "\\ensuremath{" + s + "}",
+ output_ert_inset(os, "\\ensuremath{" + s + "}",
context);
}
// swallow this
skip_spaces_braces(p);
} else
- handle_ert(os, t.asInput(), context);
+ output_ert_inset(os, t.asInput(), context);
}
else if (t.cs() == "tableofcontents" || t.cs() == "lstlistoflistings") {
end_inset(os);
p.get_token(); // swallow second arg
} else
- handle_ert(os, "\\listof{" + name + "}", context);
+ output_ert_inset(os, "\\listof{" + name + "}", context);
}
else if ((where = is_known(t.cs(), known_text_font_families)))
preamble.registerAutomaticallyLoadedPackage("color");
} else
// for custom defined colors
- handle_ert(os, t.asInput() + "{" + color + "}", context);
+ output_ert_inset(os, t.asInput() + "{" + color + "}", context);
}
else if (t.cs() == "underbar" || t.cs() == "uline") {
string localtime = p.getArg('{', '}');
preamble.registerAuthor(name);
Author const & author = preamble.getAuthor(name);
- // from_ctime() will fail if LyX decides to output the
- // time in the text language. It might also use a wrong
- // time zone (if the original LyX document was exported
- // with a different time zone).
- time_t ptime = from_ctime(localtime);
+ // from_asctime_utc() will fail if LyX decides to output the
+ // time in the text language.
+ time_t ptime = from_asctime_utc(localtime);
if (ptime == static_cast<time_t>(-1)) {
cerr << "Warning: Could not parse time `" << localtime
<< "´ for change tracking, using current time instead.\n";
}
}
+ else if (t.cs() == "textipa") {
+ context.check_layout(os);
+ begin_inset(os, "IPA\n");
+ parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
+ end_inset(os);
+ preamble.registerAutomaticallyLoadedPackage("tipa");
+ preamble.registerAutomaticallyLoadedPackage("tipx");
+ }
+
+ else if (t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
+ context.check_layout(os);
+ begin_inset(os, "IPADeco " + t.cs().substr(4) + "\n");
+ os << "status open\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
+ end_inset(os);
+ p.skip_spaces();
+ }
+
+ // the TIPA Combining diacritical marks
+ else if (is_known(t.cs(), known_tipa_marks) || t.cs() == "textvertline") {
+ preamble.registerAutomaticallyLoadedPackage("tipa");
+ preamble.registerAutomaticallyLoadedPackage("tipx");
+ context.check_layout(os);
+ if (t.cs() == "textvertline") {
+ os << "|";
+ skip_braces(p);
+ continue;
+ }
+ // try to see whether the string is in unicodesymbols
+ bool termination;
+ docstring rem;
+ string content = trimSpaceAndEol(p.verbatim_item());
+ string command = t.asInput() + "{" + content + "}";
+ set<string> req;
+ docstring s = encodings.fromLaTeXCommand(from_utf8(command),
+ Encodings::TEXT_CMD | Encodings::MATH_CMD,
+ termination, rem, &req);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << command
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ os << content << to_utf8(s);
+ } else
+ // we did not find a non-ert version
+ output_ert_inset(os, command, context);
+ }
+
+ else if (t.cs() == "tone" ) {
+ context.check_layout(os);
+ // register the tone package
+ preamble.registerAutomaticallyLoadedPackage("tone");
+ string content = trimSpaceAndEol(p.verbatim_item());
+ string command = t.asInput() + "{" + content + "}";
+ // some tones can be detected by unicodesymbols, some need special code
+ if (is_known(content, known_tones)) {
+ os << "\\IPAChar " << command << "\n";
+ continue;
+ }
+ // try to see whether the string is in unicodesymbols
+ bool termination;
+ docstring rem;
+ set<string> req;
+ docstring s = encodings.fromLaTeXCommand(from_utf8(command),
+ Encodings::TEXT_CMD | Encodings::MATH_CMD,
+ termination, rem, &req);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << command
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ os << to_utf8(s);
+ } else
+ // we did not find a non-ert version
+ output_ert_inset(os, command, context);
+ }
+
else if (t.cs() == "phantom" || t.cs() == "hphantom" ||
t.cs() == "vphantom") {
context.check_layout(os);
}
else if ((where = is_known(t.cs(), known_ref_commands))) {
- string const opt = p.getOpt();
- if (opt.empty()) {
+ // \eqref can also occur if refstyle is used
+ if (t.cs() == "eqref" && preamble.refstyle() == "1") {
context.check_layout(os);
- begin_command_inset(os, "ref",
- known_coded_ref_commands[where - known_ref_commands]);
- os << "reference \""
+ begin_command_inset(os, "ref", "formatted");
+ os << "reference \"eq:"
<< convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
end_inset(os);
- if (t.cs() == "vref" || t.cs() == "vpageref")
- preamble.registerAutomaticallyLoadedPackage("varioref");
-
+ preamble.registerAutomaticallyLoadedPackage("refstyle");
} else {
- // LyX does not support optional arguments of ref commands
- handle_ert(os, t.asInput() + '[' + opt + "]{" +
+ string const opt = p.getOpt();
+ if (opt.empty()) {
+ context.check_layout(os);
+ begin_command_inset(os, "ref",
+ known_coded_ref_commands[where - known_ref_commands]);
+ os << "reference \""
+ << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ if (t.cs() == "vref" || t.cs() == "vpageref")
+ preamble.registerAutomaticallyLoadedPackage("varioref");
+ } else {
+ // LyX does not yet support optional arguments of ref commands
+ output_ert_inset(os, t.asInput() + '[' + opt + "]{" +
p.verbatim_item() + "}", context);
+ }
}
}
+ else if ((where = is_known(t.cs(), known_refstyle_commands))) {
+ context.check_layout(os);
+ // \eqref can also occur if refstyle is not used
+ // this case is already handled in the previous else if
+ begin_command_inset(os, "ref", "formatted");
+ os << "reference \"";
+ os << known_refstyle_prefixes[where - known_refstyle_commands]
+ << ":";
+ os << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ preamble.registerAutomaticallyLoadedPackage("refstyle");
+ }
+
else if (use_natbib &&
is_known(t.cs(), known_natbib_commands) &&
((t.cs() != "citefullauthor" &&
<< convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
end_inset(os);
+ // Need to set the cite engine if natbib is loaded by
+ // the document class directly
+ if (preamble.citeEngine() == "basic")
+ preamble.citeEngine("natbib");
}
else if (use_jurabib &&
os << "before " << '"' << before << '"' << "\n";
os << "key " << '"' << citation << '"' << "\n";
end_inset(os);
+ // Need to set the cite engine if jurabib is loaded by
+ // the document class directly
+ if (preamble.citeEngine() == "basic")
+ preamble.citeEngine("jurabib");
}
else if (t.cs() == "cite"
end_inset(os);
}
- else if (t.cs() == "printindex") {
+ else if (t.cs() == "printindex" || t.cs() == "printsubindex") {
context.check_layout(os);
- begin_command_inset(os, "index_print", "printindex");
- os << "type \"idx\"\n";
+ string commandname = t.cs();
+ bool star = false;
+ if (p.next_token().asInput() == "*") {
+ commandname += "*";
+ star = true;
+ p.get_token();
+ }
+ begin_command_inset(os, "index_print", commandname);
+ string const indexname = p.getArg('[', ']');
+ if (!star) {
+ if (indexname.empty())
+ os << "type \"idx\"\n";
+ else
+ os << "type \"" << indexname << "\"\n";
+ }
end_inset(os);
skip_spaces_braces(p);
preamble.registerAutomaticallyLoadedPackage("makeidx");
context, "\\lang",
context.font.language, lang);
} else
- handle_ert(os, t.asInput() + langopts, context);
+ output_ert_inset(os, t.asInput() + langopts, context);
} else {
lang = preamble.polyglossia2lyx(t.cs().substr(4, string::npos));
parse_text_attributes(p, os, FLAG_ITEM, outer,
else if (t.cs() == "inputencoding") {
// nothing to write here
string const enc = subst(p.verbatim_item(), "\n", " ");
- p.setEncoding(enc);
+ p.setEncoding(enc, Encoding::inputenc);
}
else if ((where = is_known(t.cs(), known_special_chars))) {
os << '"';
skip_braces(p);
} else {
- handle_ert(os, "\\char`", context);
+ output_ert_inset(os, "\\char`", context);
}
} else {
- handle_ert(os, "\\char", context);
+ output_ert_inset(os, "\\char", context);
}
}
else if (t.cs() == "verb") {
context.check_layout(os);
- char const delimiter = p.next_token().character();
- // \verb is special: The usual escaping rules do not
- // apply, e.g. "\verb+\+" is valid and denotes a single
- // backslash (bug #4468). Therefore we do not allow
- // escaping in getArg().
- string const arg = p.getArg(delimiter, delimiter, false);
- ostringstream oss;
- oss << "\\verb" << delimiter << arg << delimiter;
- handle_ert(os, oss.str(), context);
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ string const arg = p.verbatimStuff(delim);
+ output_ert_inset(os, "\\verb" + delim + arg + delim, context);
}
// Problem: \= creates a tabstop inside the tabbing environment
// and else an accent. In the latter case we really would want
// \={o} instead of \= o.
else if (t.cs() == "=" && (flags & FLAG_TABBING))
- handle_ert(os, t.asInput(), context);
+ output_ert_inset(os, t.asInput(), context);
// accents (see Table 6 in Comprehensive LaTeX Symbol List)
else if (t.cs().size() == 1
preamble.registerAutomaticallyLoadedPackage(*it);
} else
// we did not find a non-ert version
- handle_ert(os, command, context);
+ output_ert_inset(os, command, context);
}
else if (t.cs() == "\\") {
context.check_layout(os);
if (p.hasOpt())
- handle_ert(os, "\\\\" + p.getOpt(), context);
+ output_ert_inset(os, "\\\\" + p.getOpt(), context);
else if (p.next_token().asInput() == "*") {
p.get_token();
// getOpt() eats the following space if there
// is no optional argument, but that is OK
// here since it has no effect in the output.
- handle_ert(os, "\\\\*" + p.getOpt(), context);
+ output_ert_inset(os, "\\\\*" + p.getOpt(), context);
}
else {
begin_inset(os, "Newline newline");
registerExternalTemplatePackages("XFig");
} else {
begin_command_inset(os, "include", name);
+ outname = subst(outname, "\"", "\\\"");
os << "preview false\n"
"filename \"" << outname << "\"\n";
if (t.cs() == "verbatiminput")
else if (t.cs() == "bibliographystyle") {
// store new bibliographystyle
bibliographystyle = p.verbatim_item();
- // If any other command than \bibliography and
- // \nocite{*} follows, we need to output the style
+ // If any other command than \bibliography, \addcontentsline
+ // and \nocite{*} follows, we need to output the style
// (because it might be used by that command).
// Otherwise, it will automatically be output by LyX.
p.pushPosition();
continue;
} else if (t2.cs() == "bibliography")
output = false;
+ else if (t2.cs() == "phantomsection") {
+ output = false;
+ continue;
+ }
+ else if (t2.cs() == "addcontentsline") {
+ // get the 3 arguments of \addcontentsline
+ p.getArg('{', '}');
+ p.getArg('{', '}');
+ contentslineContent = p.getArg('{', '}');
+ // if the last argument is not \refname we must output
+ if (contentslineContent == "\\refname")
+ output = false;
+ }
break;
}
p.popPosition();
if (output) {
- handle_ert(os,
+ output_ert_inset(os,
"\\bibliographystyle{" + bibliographystyle + '}',
context);
}
}
+ else if (t.cs() == "phantomsection") {
+ // we only support this if it occurs between
+ // \bibliographystyle and \bibliography
+ if (bibliographystyle.empty())
+ output_ert_inset(os, "\\phantomsection", context);
+ }
+
+ else if (t.cs() == "addcontentsline") {
+ context.check_layout(os);
+ // get the 3 arguments of \addcontentsline
+ string const one = p.getArg('{', '}');
+ string const two = p.getArg('{', '}');
+ string const three = p.getArg('{', '}');
+ // only if it is a \refname, we support if for the bibtex inset
+ if (contentslineContent != "\\refname") {
+ output_ert_inset(os,
+ "\\addcontentsline{" + one + "}{" + two + "}{"+ three + '}',
+ context);
+ }
+ }
+
else if (t.cs() == "bibliography") {
context.check_layout(os);
+ string BibOpts;
begin_command_inset(os, "bibtex", "bibtex");
if (!btprint.empty()) {
os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
btprint.clear();
}
os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
+ // Do we have addcontentsline?
+ if (contentslineContent == "\\refname") {
+ BibOpts = "bibtotoc";
+ // clear string because next BibTeX inset can be without addcontentsline
+ contentslineContent.clear();
+ }
// Do we have a bibliographystyle set?
- if (!bibliographystyle.empty())
- os << "options " << '"' << bibliographystyle << '"' << "\n";
+ if (!bibliographystyle.empty()) {
+ if (BibOpts.empty())
+ BibOpts = bibliographystyle;
+ else
+ BibOpts = BibOpts + ',' + bibliographystyle;
+ // clear it because each bibtex entry has its style
+ // and we need an empty string to handle \phantomsection
+ bibliographystyle.clear();
+ }
+ os << "options " << '"' << BibOpts << '"' << "\n";
end_inset(os);
}
arg += p.getFullParentheseArg();
arg += p.getFullOpt();
eat_whitespace(p, os, context, false);
- handle_ert(os, arg + '{', context);
+ output_ert_inset(os, arg + '{', context);
parse_text(p, os, FLAG_ITEM, outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else {
string special = p.getFullOpt();
special += p.getOpt();
context, t.cs(), special);
else {
eat_whitespace(p, os, context, false);
- handle_ert(os, "\\framebox{", context);
+ output_ert_inset(os, "\\framebox{", context);
parse_text(p, os, FLAG_ITEM, outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
}
}
arg += p.getFullParentheseArg();
arg += p.getFullOpt();
eat_whitespace(p, os, context, false);
- handle_ert(os, arg + '{', context);
+ output_ert_inset(os, arg + '{', context);
parse_text(p, os, FLAG_ITEM, outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else
//the syntax is: \makebox[width][position]{content}
parse_box(p, os, 0, FLAG_ITEM, outer, context,
t.cs() == "providecommand" ||
t.cs() == "providecommandx" ||
name[name.length()-1] == '*')
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
else {
context.check_layout(os);
begin_inset(os, "FormulaMacro");
CommandMap::iterator it = known_commands.find(command);
if (it != known_commands.end())
known_commands[t.asInput()] = it->second;
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
}
else if (t.cs() == "hspace" || t.cs() == "vspace") {
name += '*';
if (valid) {
if (value == 1.0)
- handle_ert(os, name + '{' + unit + '}', context);
+ output_ert_inset(os, name + '{' + unit + '}', context);
else if (value == -1.0)
- handle_ert(os, name + "{-" + unit + '}', context);
+ output_ert_inset(os, name + "{-" + unit + '}', context);
else
- handle_ert(os, name + '{' + valstring + unit + '}', context);
+ output_ert_inset(os, name + '{' + valstring + unit + '}', context);
} else
- handle_ert(os, name + '{' + length + '}', context);
+ output_ert_inset(os, name + '{' + length + '}', context);
}
}
begin_inset(os, "Flex ");
os << to_utf8(newinsetlayout->name()) << '\n'
<< "status collapsed\n";
- parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
+ if (newinsetlayout->isPassThru()) {
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ if (delim != "{")
+ cerr << "Warning: bad delimiter for command " << t.asInput() << endl;
+ string const arg = p.verbatimStuff("}");
+ Context newcontext(true, context.textclass);
+ if (newinsetlayout->forcePlainLayout())
+ newcontext.layout = &context.textclass.plainLayout();
+ output_ert(os, arg, newcontext);
+ } else
+
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
end_inset(os);
}
bool termination;
docstring rem;
set<string> req;
- docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()),
+ string name = t.asInput();
+ // handle some TIPA special characters
+ if (name == "\\textglobfall") {
+ name = "End";
+ skip_braces(p);
+ }
+ if (name == "\\textdoublevertline") {
+ name = "\\textbardbl";
+ skip_braces(p);
+ }
+ if (name == "\\!" ) {
+ if (p.next_token().asInput() == "b") {
+ p.get_token(); // eat 'b'
+ name = "\\texthtb";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "d") {
+ p.get_token();
+ name = "\\texthtd";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "g") {
+ p.get_token();
+ name = "\\texthtg";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "G") {
+ p.get_token();
+ name = "\\texthtscg";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "j") {
+ p.get_token();
+ name = "\\texthtbardotlessj";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "o") {
+ p.get_token();
+ name = "\\textbullseye";
+ skip_braces(p);
+ }
+ }
+ if (name == "\\*" ) {
+ if (p.next_token().asInput() == "k") {
+ p.get_token();
+ name = "\\textturnk";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "r") {
+ p.get_token(); // eat 'b'
+ name = "\\textturnr";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "t") {
+ p.get_token();
+ name = "\\textturnt";
+ skip_braces(p);
+ }
+ if (p.next_token().asInput() == "w") {
+ p.get_token();
+ name = "\\textturnw";
+ skip_braces(p);
+ }
+ }
+ // now get the character from unicodesymbols
+ docstring s = encodings.fromLaTeXCommand(from_utf8(name),
Encodings::TEXT_CMD, termination, rem, &req);
if (!s.empty()) {
if (!rem.empty())
z = p.verbatim_item();
}
cerr << "found ERT: " << s << endl;
- handle_ert(os, s + ' ', context);
+ output_ert_inset(os, s + ' ', context);
*/
else {
string name = t.asInput();
name += '*';
}
if (!parse_command(name, p, os, outer, context))
- handle_ert(os, name, context);
+ output_ert_inset(os, name, context);
}
}
if (t.cat() == catEscape) {
if (t.cs() == "inputencoding") {
string const enc = subst(p.verbatim_item(), "\n", " ");
- p.setEncoding(enc);
+ p.setEncoding(enc, Encoding::inputenc);
continue;
}
if (t.cs() != "begin")
char const * const * const where =
is_known(encoding, supported_CJK_encodings);
if (where)
- p.setEncoding(encoding);
+ p.setEncoding(encoding, Encoding::CJK);
else
- p.setEncoding("utf8");
- string const text = p.verbatimEnvironment("CJK");
+ p.setEncoding("UTF-8");
+ string const text = p.ertEnvironment("CJK");
p.setEncoding(encoding_old);
p.skip_spaces();
if (!where) {