namespace lyx {
+namespace {
+
+void output_arguments(ostream &, Parser &, bool, bool, bool, Context &,
+ Layout::LaTeXArgMap const &);
+
+}
+
+
void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
Context const & context, InsetLayout const * layout)
{
newcontext.layout = &context.textclass.plainLayout();
else
newcontext.font = context.font;
+ if (layout)
+ output_arguments(os, p, outer, false, false, newcontext,
+ layout->latexargs());
parse_text(p, os, flags, outer, newcontext);
+ if (layout)
+ output_arguments(os, p, outer, false, true, newcontext,
+ layout->postcommandargs());
newcontext.check_end_layout(os);
}
return os.str();
}
+string fboxrule = "";
+string fboxsep = "";
+string shadow_size = "";
char const * const known_ref_commands[] = { "ref", "pageref", "vref",
- "vpageref", "prettyref", "eqref", 0 };
+ "vpageref", "prettyref", "nameref", "eqref", 0 };
char const * const known_coded_ref_commands[] = { "ref", "pageref", "vref",
- "vpageref", "formatted", "eqref", 0 };
+ "vpageref", "formatted", "nameref", "eqref", 0 };
+
+char const * const known_refstyle_commands[] = { "algref", "chapref", "corref",
+ "eqref", "enuref", "figref", "fnref", "lemref", "parref", "partref", "propref",
+ "secref", "subsecref", "tabref", "thmref", 0 };
+
+char const * const known_refstyle_prefixes[] = { "alg", "chap", "cor",
+ "eq", "enu", "fig", "fn", "lem", "par", "part", "prop",
+ "sec", "subsec", "tab", "thm", 0 };
+
+
+/**
+ * supported CJK encodings
+ * JIS does not work with LyX's encoding conversion
+ */
+const char * const supported_CJK_encodings[] = {
+"EUC-JP", "KS", "GB", "UTF8",
+"Bg5", /*"JIS",*/ "SJIS", 0};
+
+/**
+ * the same as supported_CJK_encodings with their corresponding LyX language name
+ * FIXME: The mapping "UTF8" => "chinese-traditional" is only correct for files
+ * created by LyX.
+ * NOTE: "Bg5", "JIS" and "SJIS" are not supported by LyX, on re-export the
+ * encodings "UTF8", "EUC-JP" and "EUC-JP" will be used.
+ * please keep this in sync with supported_CJK_encodings line by line!
+ */
+const char * const supported_CJK_languages[] = {
+"japanese-cjk", "korean", "chinese-simplified", "chinese-traditional",
+"chinese-traditional", /*"japanese-cjk",*/ "japanese-cjk", 0};
/*!
* natbib commands.
"smallcaps", "up", 0};
/// Known special characters which need skip_spaces_braces() afterwards
-char const * const known_special_chars[] = {"ldots", "lyxarrow",
-"textcompwordmark", "slash", 0};
+char const * const known_special_chars[] = {"ldots",
+"lyxarrow", "textcompwordmark",
+"slash", "textasciitilde", "textasciicircum", "textbackslash",
+"LyX", "TeX", "LaTeXe",
+"LaTeX", 0};
+
+/// special characters from known_special_chars which may have a \\protect before
+char const * const known_special_protect_chars[] = {"LyX", "TeX",
+"LaTeXe", "LaTeX", 0};
/// the same as known_special_chars with .lyx names
-char const * const known_coded_special_chars[] = {"ldots{}", "menuseparator",
-"textcompwordmark{}", "slash{}", 0};
+char const * const known_coded_special_chars[] = {"\\SpecialChar ldots\n",
+"\\SpecialChar menuseparator\n", "\\SpecialChar ligaturebreak\n",
+"\\SpecialChar breakableslash\n", "~", "^", "\n\\backslash\n",
+"\\SpecialChar LyX\n", "\\SpecialChar TeX\n", "\\SpecialChar LaTeX2e\n",
+"\\SpecialChar LaTeX\n", 0};
/*!
* Graphics file extensions known by the dvips driver of the graphics package.
"hfill{}", "dotfill{}", "hrulefill{}", "leftarrowfill{}", "rightarrowfill{}",
"upbracefill{}", "downbracefill{}", 0};
-/// These are translated by LyX to commands like "\\LyX{}", so we have to put
-/// them in ERT. "LaTeXe" must come before "LaTeX"!
-char const * const known_phrases[] = {"LyX", "TeX", "LaTeXe", "LaTeX", 0};
-char const * const known_coded_phrases[] = {"LyX", "TeX", "LaTeX2e", "LaTeX", 0};
-int const known_phrase_lengths[] = {3, 5, 7, 0};
+/// known TIPA combining diacritical marks
+char const * const known_tipa_marks[] = {"textsubwedge", "textsubumlaut",
+"textsubtilde", "textseagull", "textsubbridge", "textinvsubbridge",
+"textsubsquare", "textsubrhalfring", "textsublhalfring", "textsubplus",
+"textovercross", "textsubarch", "textsuperimposetilde", "textraising",
+"textlowering", "textadvancing", "textretracting", "textdoublegrave",
+"texthighrise", "textlowrise", "textrisefall", "textsyllabic",
+"textsubring", "textsubbar", 0};
+
+/// TIPA tones that need special handling
+char const * const known_tones[] = {"15", "51", "45", "12", "454", 0};
// string to store the float type to be able to determine the type of subfloats
string float_type = "";
bool termination;
docstring rem;
set<string> req;
- docstring parsed = encodings.fromLaTeXCommand(s,
- Encodings::TEXT_CMD, termination, rem, &req);
+ docstring parsed = normalize_c(encodings.fromLaTeXCommand(s,
+ Encodings::TEXT_CMD, termination, rem, &req));
set<string>::const_iterator it = req.begin();
set<string>::const_iterator en = req.end();
for (; it != en; ++it)
}
-void handle_backslash(ostream & os, string const & s)
+void output_ert(ostream & os, string const & s, Context & context)
{
+ context.check_layout(os);
for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
if (*it == '\\')
os << "\n\\backslash\n";
- else
+ else if (*it == '\n') {
+ context.new_paragraph(os);
+ context.check_layout(os);
+ } else
os << *it;
}
+ context.check_end_layout(os);
}
-void handle_ert(ostream & os, string const & s, Context & context)
+void output_ert_inset(ostream & os, string const & s, Context & context)
{
// We must have a valid layout before outputting the ERT inset.
context.check_layout(os);
Context newcontext(true, context.textclass);
+ InsetLayout const & layout = context.textclass.insetLayout(from_ascii("ERT"));
+ if (layout.forcePlainLayout())
+ newcontext.layout = &context.textclass.plainLayout();
begin_inset(os, "ERT");
os << "\nstatus collapsed\n";
- newcontext.check_layout(os);
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\n\\backslash\n";
- else if (*it == '\n') {
- newcontext.new_paragraph(os);
- newcontext.check_layout(os);
- } else
- os << *it;
- }
- newcontext.check_end_layout(os);
+ output_ert(os, s, newcontext);
end_inset(os);
}
-void handle_comment(ostream & os, string const & s, Context & context)
+void output_comment(Parser & p, ostream & os, string const & s,
+ Context & context)
{
- // TODO: Handle this better
- Context newcontext(true, context.textclass);
- begin_inset(os, "ERT");
- os << "\nstatus collapsed\n";
- newcontext.check_layout(os);
- handle_backslash(os, s);
- // make sure that our comment is the last thing on the line
- newcontext.new_paragraph(os);
- newcontext.check_layout(os);
- newcontext.check_end_layout(os);
- end_inset(os);
+ if (p.next_token().cat() == catNewline)
+ output_ert_inset(os, '%' + s, context);
+ else
+ output_ert_inset(os, '%' + s + '\n', context);
}
}
+void output_arguments(ostream & os, Parser & p, bool outer, bool need_layout, bool post,
+ Context & context, Layout::LaTeXArgMap const & latexargs)
+{
+ if (need_layout) {
+ context.check_layout(os);
+ need_layout = false;
+ } else
+ need_layout = true;
+ int i = 0;
+ Layout::LaTeXArgMap::const_iterator lait = latexargs.begin();
+ Layout::LaTeXArgMap::const_iterator const laend = latexargs.end();
+ for (; lait != laend; ++lait) {
+ ++i;
+ eat_whitespace(p, os, context, false);
+ if (lait->second.mandatory) {
+ if (p.next_token().cat() != catBegin)
+ break;
+ p.get_token(); // eat '{'
+ if (need_layout) {
+ context.check_layout(os);
+ need_layout = false;
+ }
+ begin_inset(os, "Argument ");
+ if (post)
+ os << "post:";
+ os << i << "\nstatus collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
+ end_inset(os);
+ } else {
+ if (p.next_token().cat() == catEscape ||
+ p.next_token().character() != '[')
+ continue;
+ p.get_token(); // eat '['
+ if (need_layout) {
+ context.check_layout(os);
+ need_layout = false;
+ }
+ begin_inset(os, "Argument ");
+ if (post)
+ os << "post:";
+ os << i << "\nstatus collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
+ end_inset(os);
+ }
+ eat_whitespace(p, os, context, false);
+ }
+}
+
+
void output_command_layout(ostream & os, Parser & p, bool outer,
Context & parent_context,
Layout const * newlayout)
context.need_end_deeper = true;
}
context.check_deeper(os);
- context.check_layout(os);
- unsigned int optargs = 0;
- while (optargs < context.layout->optargs) {
- eat_whitespace(p, os, context, false);
- if (p.next_token().cat() == catEscape ||
- p.next_token().character() != '[')
- break;
- p.get_token(); // eat '['
- begin_inset(os, "Argument\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- ++optargs;
- }
- unsigned int reqargs = 0;
- while (reqargs < context.layout->reqargs) {
- eat_whitespace(p, os, context, false);
- if (p.next_token().cat() != catBegin)
- break;
- p.get_token(); // eat '{'
- begin_inset(os, "Argument\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- ++reqargs;
- }
+ output_arguments(os, p, outer, true, false, context,
+ context.layout->latexargs());
parse_text(p, os, FLAG_ITEM, outer, context);
+ output_arguments(os, p, outer, false, true, context,
+ context.layout->postcommandargs());
context.check_end_layout(os);
if (parent_context.deeper_paragraph) {
// We must suppress the "end deeper" because we
case required:
case req_group:
// This argument contains regular LaTeX
- handle_ert(os, ert + '{', context);
+ output_ert_inset(os, ert + '{', context);
eat_whitespace(p, os, context, false);
if (template_arguments[i] == required)
parse_text(p, os, FLAG_ITEM, outer, context);
break;
}
}
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
}
void parse_box(Parser & p, ostream & os, unsigned outer_flags,
unsigned inner_flags, bool outer, Context & parent_context,
string const & outer_type, string const & special,
- string const & inner_type)
+ string inner_type, string const & frame_color,
+ string const & background_color)
{
string position;
string inner_pos;
- string hor_pos = "c";
+ string hor_pos = "l";
// We need to set the height to the LaTeX default of 1\\totalheight
// for the case when no height argument is given
string height_value = "1";
string width_unit;
string latex_width;
string width_special = "none";
+ string thickness = "0.4pt";
+ if (!fboxrule.empty())
+ thickness = fboxrule;
+ else
+ thickness = "0.4pt";
+ string separation;
+ if (!fboxsep.empty())
+ separation = fboxsep;
+ else
+ separation = "3pt";
+ string shadowsize;
+ if (!shadow_size.empty())
+ shadowsize = shadow_size;
+ else
+ shadowsize = "4pt";
+ string framecolor = "black";
+ string backgroundcolor = "none";
+ if (!frame_color.empty())
+ framecolor = frame_color;
+ if (!background_color.empty())
+ backgroundcolor = background_color;
+ // if there is a color box around the \begin statements have not yet been parsed
+ // so do this now
+ if (!frame_color.empty() || !background_color.empty()) {
+ eat_whitespace(p, os, parent_context, false);
+ p.get_token().asInput(); // the '{'
+ // parse minipage
+ if (p.next_token().asInput() == "\\begin") {
+ p.get_token().asInput();
+ p.getArg('{', '}');
+ inner_type = "minipage";
+ inner_flags = FLAG_END;
+ active_environments.push_back("minipage");
+ }
+ // parse parbox
+ else if (p.next_token().asInput() == "\\parbox") {
+ p.get_token().asInput();
+ inner_type = "parbox";
+ inner_flags = FLAG_ITEM;
+ }
+ // parse makebox
+ else if (p.next_token().asInput() == "\\makebox") {
+ p.get_token().asInput();
+ inner_type = "makebox";
+ inner_flags = FLAG_ITEM;
+ }
+ // in case there is just \colorbox{color}{text}
+ else {
+ latex_width = "";
+ inner_type = "makebox";
+ inner_flags = FLAG_BRACE_LAST;
+ position = "t";
+ inner_pos = "t";
+ }
+ }
+ if (!p.hasOpt() && (inner_type == "makebox" || outer_type == "mbox"))
+ hor_pos = "c";
if (!inner_type.empty() && p.hasOpt()) {
if (inner_type != "makebox")
position = p.getArg('[', ']');
if (inner_type != "makebox") {
latex_height = p.getArg('[', ']');
translate_box_len(latex_height, height_value, height_unit, height_special);
- } else
- hor_pos = p.getArg('[', ']');
+ } else {
+ string const opt = p.getArg('[', ']');
+ if (!opt.empty()) {
+ hor_pos = opt;
+ if (hor_pos != "l" && hor_pos != "c" &&
+ hor_pos != "r" && hor_pos != "s") {
+ cerr << "invalid hor_pos " << hor_pos
+ << " for " << inner_type << endl;
+ hor_pos = "c";
+ }
+ }
+ }
if (p.hasOpt()) {
inner_pos = p.getArg('[', ']');
inner_pos = position;
}
}
+ } else {
+ if (inner_type == "makebox")
+ hor_pos = "c";
}
}
if (inner_type.empty()) {
if (!opt.empty()) {
hor_pos = opt;
if (hor_pos != "l" && hor_pos != "c" &&
- hor_pos != "r") {
+ hor_pos != "r" && hor_pos != "s") {
cerr << "invalid hor_pos " << hor_pos
<< " for " << outer_type << endl;
hor_pos = "c";
}
+ } else {
+ if (outer_type == "framebox")
+ hor_pos = "c";
}
}
} else if (inner_type != "makebox")
if (!outer_type.empty() && !inner_type.empty() &&
(inner_flags & FLAG_END))
active_environments.push_back(inner_type);
- // LyX can't handle length variables
- bool use_ert = contains(width_unit, '\\') || contains(height_unit, '\\');
- if (!use_ert && !outer_type.empty() && !inner_type.empty()) {
+ bool use_ert = false;
+ if (!outer_type.empty() && !inner_type.empty()) {
// Look whether there is some content after the end of the
// inner box, but before the end of the outer box.
// If yes, we need to output ERT.
p.pushPosition();
if (inner_flags & FLAG_END)
- p.verbatimEnvironment(inner_type);
+ p.ertEnvironment(inner_type);
else
p.verbatim_item();
p.skip_spaces(true);
}
p.popPosition();
}
- // if only \makebox{content} was used we can set its width to 1\width
- // because this identic and also identic to \mbox
- // this doesn't work for \framebox{content}, thus we have to use ERT for this
- if (latex_width.empty() && inner_type == "makebox") {
- width_value = "1";
- width_unit = "in";
- width_special = "width";
- } else if (latex_width.empty() && outer_type == "framebox") {
- use_ert = true;
- }
+
if (use_ert) {
ostringstream ss;
if (!outer_type.empty()) {
}
if (inner_type == "shaded")
ss << "\\begin{shaded}";
- handle_ert(os, ss.str(), parent_context);
+ output_ert_inset(os, ss.str(), parent_context);
if (!inner_type.empty()) {
parse_text(p, os, inner_flags, outer, parent_context);
if (inner_flags & FLAG_END)
- handle_ert(os, "\\end{" + inner_type + '}',
+ output_ert_inset(os, "\\end{" + inner_type + '}',
parent_context);
else
- handle_ert(os, "}", parent_context);
+ output_ert_inset(os, "}", parent_context);
}
if (!outer_type.empty()) {
// If we already read the inner box we have to pop
}
parse_text(p, os, outer_flags, outer, parent_context);
if (outer_flags & FLAG_END)
- handle_ert(os, "\\end{" + outer_type + '}',
+ output_ert_inset(os, "\\end{" + outer_type + '}',
parent_context);
- else if (inner_type.empty() && outer_type == "framebox")
- // in this case it is already closed later
- ;
else
- handle_ert(os, "}", parent_context);
+ output_ert_inset(os, "}", parent_context);
}
} else {
// LyX does not like empty positions, so we have
begin_inset(os, "Box ");
if (outer_type == "framed")
os << "Framed\n";
- else if (outer_type == "framebox")
+ else if (outer_type == "framebox" || outer_type == "fbox" || !frame_color.empty())
os << "Boxed\n";
else if (outer_type == "shadowbox")
os << "Shadowbox\n";
preamble.registerAutomaticallyLoadedPackage("color");
} else if (outer_type == "doublebox")
os << "Doublebox\n";
- else if (outer_type.empty())
+ else if (outer_type.empty() || outer_type == "mbox")
os << "Frameless\n";
else
os << outer_type << '\n';
os << "position \"" << position << "\"\n";
os << "hor_pos \"" << hor_pos << "\"\n";
- os << "has_inner_box " << !inner_type.empty() << "\n";
+ if (outer_type == "mbox")
+ os << "has_inner_box 1\n";
+ else if (!frame_color.empty() && inner_type == "makebox")
+ os << "has_inner_box 0\n";
+ else
+ os << "has_inner_box " << !inner_type.empty() << "\n";
os << "inner_pos \"" << inner_pos << "\"\n";
os << "use_parbox " << (inner_type == "parbox" || shadedparbox)
<< '\n';
- os << "use_makebox " << (inner_type == "makebox") << '\n';
- os << "width \"" << width_value << width_unit << "\"\n";
- os << "special \"" << width_special << "\"\n";
- os << "height \"" << height_value << height_unit << "\"\n";
+ if (outer_type == "mbox")
+ os << "use_makebox 1\n";
+ else if (!frame_color.empty())
+ os << "use_makebox 0\n";
+ else
+ os << "use_makebox " << (inner_type == "makebox") << '\n';
+ if (outer_type == "mbox" || (outer_type == "fbox" && inner_type.empty()))
+ os << "width \"\"\n";
+ // for values like "1.5\width" LyX uses "1.5in" as width ad sets "width" as sepecial
+ else if (contains(width_unit, '\\'))
+ os << "width \"" << width_value << "in" << "\"\n";
+ else
+ os << "width \"" << width_value << width_unit << "\"\n";
+ if (contains(width_unit, '\\')) {
+ width_unit.erase (0,1); // remove the leading '\'
+ os << "special \"" << width_unit << "\"\n";
+ } else
+ os << "special \"" << width_special << "\"\n";
+ if (contains(height_unit, '\\'))
+ os << "height \"" << height_value << "in" << "\"\n";
+ else
+ os << "height \"" << height_value << height_unit << "\"\n";
os << "height_special \"" << height_special << "\"\n";
+ os << "thickness \"" << thickness << "\"\n";
+ os << "separation \"" << separation << "\"\n";
+ os << "shadowsize \"" << shadowsize << "\"\n";
+ os << "framecolor \"" << framecolor << "\"\n";
+ os << "backgroundcolor \"" << backgroundcolor << "\"\n";
os << "status open\n\n";
// Unfortunately we can't use parse_text_in_inset:
// LyX puts a % after the end of the minipage
if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
// new paragraph
- //handle_comment(os, "%dummy", parent_context);
+ //output_comment(p, os, "dummy", parent_context);
p.get_token();
p.skip_spaces();
parent_context.new_paragraph(os);
}
else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
- //handle_comment(os, "%dummy", parent_context);
+ //output_comment(p, os, "dummy", parent_context);
p.get_token();
p.skip_spaces();
// We add a protected space if something real follows
}
#endif
}
+ if (inner_type == "minipage" && (!frame_color.empty() || !background_color.empty()))
+ active_environments.pop_back();
+ if (inner_flags != FLAG_BRACE_LAST && (!frame_color.empty() || !background_color.empty())) {
+ // in this case we have to eat the the closing brace of the color box
+ p.get_token().asInput(); // the '}'
+ }
+ if (p.next_token().asInput() == "}") {
+ // in this case we assume that the closing brace is from the box settings
+ // therefore reset these values for the next box
+ fboxrule = "";
+ fboxsep = "";
+ shadow_size = "";
+ }
+
+ // all boxes except of Frameless and Shaded require calc
+ if (!(outer_type.empty() || outer_type == "mbox") &&
+ !((outer_type == "shaded" && inner_type.empty()) ||
+ (outer_type == "minipage" && inner_type == "shaded") ||
+ (outer_type == "parbox" && inner_type == "shaded")))
+ preamble.registerAutomaticallyLoadedPackage("calc");
}
p.skip_spaces(true);
}
}
- if (outer_type == "shaded") {
+ if (outer_type == "shaded" || outer_type == "mbox") {
// These boxes never have an inner box
;
} else if (p.next_token().asInput() == "\\parbox") {
eat_whitespace(p, os, parent_context, false);
}
parse_box(p, os, flags, FLAG_END, outer, parent_context,
- outer_type, special, inner);
+ outer_type, special, inner, "", "");
} else {
if (inner_flags == FLAG_ITEM) {
p.get_token();
eat_whitespace(p, os, parent_context, false);
}
parse_box(p, os, flags, inner_flags, outer, parent_context,
- outer_type, special, inner);
+ outer_type, special, inner, "", "");
}
}
if (p.hasOpt()) {
string arg = p.verbatimOption();
os << "lstparams " << '"' << arg << '"' << '\n';
+ if (arg.find("\\color") != string::npos)
+ preamble.registerAutomaticallyLoadedPackage("color");
}
if (in_line)
os << "inline true\n";
context.layout = &parent_context.textclass.plainLayout();
string s;
if (in_line) {
- s = p.plainCommand('!', '!', "lstinline");
- context.new_paragraph(os);
- context.check_layout(os);
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ //FIXME: handler error condition
+ s = p.verbatimStuff(delim).second;
+// context.new_paragraph(os);
} else
- s = p.plainEnvironment("lstlisting");
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\n\\backslash\n";
- else if (*it == '\n') {
- // avoid adding an empty paragraph at the end
- if (it + 1 != et) {
- context.new_paragraph(os);
- context.check_layout(os);
- }
- } else
- os << *it;
- }
- context.check_end_layout(os);
+ s = p.verbatimEnvironment("lstlisting");
+ output_ert(os, s, context);
end_inset(os);
}
bool const new_layout_allowed = parent_context.new_layout_allowed;
if (specialfont)
parent_context.new_layout_allowed = false;
- handle_ert(os, "\\begin{" + name + "}", parent_context);
+ output_ert_inset(os, "\\begin{" + name + "}", parent_context);
parse_text_snippet(p, os, flags, outer, parent_context);
- handle_ert(os, "\\end{" + name + "}", parent_context);
+ output_ert_inset(os, "\\end{" + name + "}", parent_context);
if (specialfont)
parent_context.new_layout_allowed = new_layout_allowed;
}
}
}
+ else if (is_known(name, preamble.polyglossia_languages)) {
+ // We must begin a new paragraph if not already done
+ if (! parent_context.atParagraphStart()) {
+ parent_context.check_end_layout(os);
+ parent_context.new_paragraph(os);
+ }
+ // save the language in the context so that it is
+ // handled by parse_text
+ parent_context.font.language = preamble.polyglossia2lyx(name);
+ parse_text(p, os, FLAG_END, outer, parent_context);
+ // Just in case the environment is empty
+ parent_context.extra_stuff.erase();
+ // We must begin a new paragraph to reset the language
+ parent_context.new_paragraph(os);
+ p.skip_spaces();
+ }
+
else if (unstarred_name == "tabular" || name == "longtable") {
eat_whitespace(p, os, parent_context, false);
string width = "0pt";
if (!opt.empty())
os << "placement " << opt << '\n';
if (contains(opt, "H"))
- preamble.registerAutomaticallyLoadedPackage("float");
+ preamble.registerAutomaticallyLoadedPackage("float");
else {
Floating const & fl = parent_context.textclass.floats()
- .getType(unstarred_name);
- if (!fl.floattype().empty() && fl.usesFloatPkg())
- preamble.registerAutomaticallyLoadedPackage("float");
+ .getType(unstarred_name);
+ if (!fl.floattype().empty() && fl.usesFloatPkg())
+ preamble.registerAutomaticallyLoadedPackage("float");
}
os << "wide " << convert<string>(is_starred)
parent_context, name, "shaded");
else
parse_box(p, os, 0, FLAG_END, outer, parent_context,
- "", "", name);
+ "", "", name, "", "");
p.skip_spaces();
}
preamble.registerAutomaticallyLoadedPackage("verbatim");
}
- else if (name == "verbatim") {
- os << "\n\\end_layout\n\n\\begin_layout Verbatim\n";
- string const s = p.plainEnvironment("verbatim");
- string::const_iterator it2 = s.begin();
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\\backslash ";
- else if (*it == '\n') {
- it2 = it + 1;
- // avoid adding an empty paragraph at the end
- // FIXME: if there are 2 consecutive spaces at the end ignore it
- // because LyX will re-add a \n
- // This hack must be removed once bug 8049 is fixed!
- if ((it + 1 != et) && (it + 2 != et || *it2 != '\n'))
- os << "\n\\end_layout\n\\begin_layout Verbatim\n";
- } else
- os << *it;
- }
- os << "\n\\end_layout\n\n";
+ else if (unstarred_name == "verbatim") {
+ // FIXME: this should go in the generic code that
+ // handles environments defined in layout file that
+ // have "PassThru 1". However, the code over there is
+ // already too complicated for my taste.
+ string const ascii_name =
+ (name == "verbatim*") ? "Verbatim*" : "Verbatim";
+ parent_context.new_paragraph(os);
+ Context context(true, parent_context.textclass,
+ &parent_context.textclass[from_ascii(ascii_name)]);
+ string s = p.verbatimEnvironment(name);
+ output_ert(os, s, context);
+ p.skip_spaces();
+ }
+
+ else if (name == "IPA") {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_inset(os, "IPA\n");
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ end_inset(os);
+ p.skip_spaces();
+ preamble.registerAutomaticallyLoadedPackage("tipa");
+ preamble.registerAutomaticallyLoadedPackage("tipx");
+ }
+
+ else if (name == "CJK") {
+ // the scheme is \begin{CJK}{encoding}{mapping}text\end{CJK}
+ // It is impossible to decide if a CJK environment was in its own paragraph or within
+ // a line. We therefore always assume a paragraph since the latter is a rare case.
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_end_layout(os);
+ // store the encoding to be able to reset it
+ string const encoding_old = p.getEncoding();
+ string const encoding = p.getArg('{', '}');
+ // FIXME: For some reason JIS does not work. Although the text
+ // in tests/CJK.tex is identical with the SJIS version if you
+ // convert both snippets using the recode command line utility,
+ // the resulting .lyx file contains some extra characters if
+ // you set buggy_encoding to false for JIS.
+ bool const buggy_encoding = encoding == "JIS";
+ if (!buggy_encoding)
+ p.setEncoding(encoding, Encoding::CJK);
+ else {
+ // FIXME: This will read garbage, since the data is not encoded in utf8.
+ p.setEncoding("UTF-8");
+ }
+ // LyX only supports the same mapping for all CJK
+ // environments, so we might need to output everything as ERT
+ string const mapping = trim(p.getArg('{', '}'));
+ char const * const * const where =
+ is_known(encoding, supported_CJK_encodings);
+ if (!buggy_encoding && !preamble.fontCJKSet())
+ preamble.fontCJK(mapping);
+ bool knownMapping = mapping == preamble.fontCJK();
+ if (buggy_encoding || !knownMapping || !where) {
+ parent_context.check_layout(os);
+ output_ert_inset(os, "\\begin{" + name + "}{" + encoding + "}{" + mapping + "}",
+ parent_context);
+ // we must parse the content as verbatim because e.g. JIS can contain
+ // normally invalid characters
+ // FIXME: This works only for the most simple cases.
+ // Since TeX control characters are not parsed,
+ // things like comments are completely wrong.
+ string const s = p.plainEnvironment("CJK");
+ for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
+ if (*it == '\\')
+ output_ert_inset(os, "\\", parent_context);
+ else if (*it == '$')
+ output_ert_inset(os, "$", parent_context);
+ else if (*it == '\n' && it + 1 != et && s.begin() + 1 != it)
+ os << "\n ";
+ else
+ os << *it;
+ }
+ output_ert_inset(os, "\\end{" + name + "}",
+ parent_context);
+ } else {
+ string const lang =
+ supported_CJK_languages[where - supported_CJK_encodings];
+ // store the language because we must reset it at the end
+ string const lang_old = parent_context.font.language;
+ parent_context.font.language = lang;
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ parent_context.font.language = lang_old;
+ parent_context.new_paragraph(os);
+ }
+ p.setEncoding(encoding_old);
p.skip_spaces();
- // reset to Standard layout
- os << "\n\\begin_layout Standard\n";
}
else if (name == "lyxgreyedout") {
preamble.registerAutomaticallyLoadedPackage("color");
}
+ else if (name == "btSect") {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_command_inset(os, "bibtex", "bibtex");
+ string bibstyle = "plain";
+ if (p.hasOpt()) {
+ bibstyle = p.getArg('[', ']');
+ p.skip_spaces(true);
+ }
+ string const bibfile = p.getArg('{', '}');
+ eat_whitespace(p, os, parent_context, false);
+ Token t = p.get_token();
+ if (t.asInput() == "\\btPrintCited") {
+ p.skip_spaces(true);
+ os << "btprint " << '"' << "btPrintCited" << '"' << "\n";
+ }
+ if (t.asInput() == "\\btPrintNotCited") {
+ p.skip_spaces(true);
+ os << "btprint " << '"' << "btPrintNotCited" << '"' << "\n";
+ }
+ if (t.asInput() == "\\btPrintAll") {
+ p.skip_spaces(true);
+ os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
+ }
+ os << "bibfiles " << '"' << bibfile << '"' << "\n";
+ os << "options " << '"' << bibstyle << '"' << "\n";
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ end_inset(os);
+ p.skip_spaces();
+ }
+
else if (name == "framed" || name == "shaded") {
eat_whitespace(p, os, parent_context, false);
parse_outer_box(p, os, FLAG_END, outer, parent_context, name, "");
p.skip_spaces();
+ preamble.registerAutomaticallyLoadedPackage("framed");
}
else if (name == "lstlisting") {
eat_whitespace(p, os, parent_context, false);
- // FIXME handle the automatic color package loading
- // uwestoehr asks: In what case color is loaded?
parse_listings(p, os, parent_context, false);
p.skip_spaces();
}
if (last_env == name) {
// we need to output a separator since LyX would export
// the two environments as one otherwise (bug 5716)
- docstring const sep = from_ascii("--Separator--");
TeX2LyXDocClass const & textclass(parent_context.textclass);
- if (textclass.hasLayout(sep)) {
- Context newcontext(parent_context);
- newcontext.layout = &(textclass[sep]);
- newcontext.check_layout(os);
- newcontext.check_end_layout(os);
- } else {
- parent_context.check_layout(os);
- begin_inset(os, "Note Note\n");
- os << "status closed\n";
- Context newcontext(true, textclass,
- &(textclass.defaultLayout()));
- newcontext.check_layout(os);
- newcontext.check_end_layout(os);
- end_inset(os);
- parent_context.check_end_layout(os);
- }
+ Context newcontext(true, textclass,
+ &(textclass.defaultLayout()));
+ newcontext.check_layout(os);
+ begin_inset(os, "Separator plain\n");
+ end_inset(os);
+ newcontext.check_end_layout(os);
}
switch (context.layout->latextype) {
case LATEX_LIST_ENVIRONMENT:
}
context.check_deeper(os);
// handle known optional and required arguments
- // layouts require all optional arguments before the required ones
// Unfortunately LyX can't handle arguments of list arguments (bug 7468):
// It is impossible to place anything after the environment name,
// but before the first \\item.
- if (context.layout->latextype == LATEX_ENVIRONMENT) {
- bool need_layout = true;
- unsigned int optargs = 0;
- while (optargs < context.layout->optargs) {
- eat_whitespace(p, os, context, false);
- if (p.next_token().cat() == catEscape ||
- p.next_token().character() != '[')
- break;
- p.get_token(); // eat '['
- if (need_layout) {
- context.check_layout(os);
- need_layout = false;
- }
- begin_inset(os, "Argument\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- ++optargs;
- }
- unsigned int reqargs = 0;
- while (reqargs < context.layout->reqargs) {
- eat_whitespace(p, os, context, false);
- if (p.next_token().cat() != catBegin)
- break;
- p.get_token(); // eat '{'
- if (need_layout) {
- context.check_layout(os);
- need_layout = false;
- }
- begin_inset(os, "Argument\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- ++reqargs;
- }
- }
+ if (context.layout->latextype == LATEX_ENVIRONMENT)
+ output_arguments(os, p, outer, false, false, context,
+ context.layout->latexargs());
parse_text(p, os, FLAG_END, outer, context);
+ if (context.layout->latextype == LATEX_ENVIRONMENT)
+ output_arguments(os, p, outer, false, true, context,
+ context.layout->postcommandargs());
context.check_end_layout(os);
if (parent_context.deeper_paragraph) {
// We must suppress the "end deeper" because we
begin_inset(os, "Flex ");
os << to_utf8(newinsetlayout->name()) << '\n'
<< "status collapsed\n";
- parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
+ if (newinsetlayout->isPassThru()) {
+ string const arg = p.verbatimEnvironment(name);
+ Context context(true, parent_context.textclass,
+ &parent_context.textclass.plainLayout(),
+ parent_context.layout);
+ output_ert(os, arg, parent_context);
+ } else
+ parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
end_inset(os);
}
parse_arguments("\\begin{" + name + "}", arguments, p, os,
outer, parent_context);
if (contents == verbatim)
- handle_ert(os, p.verbatimEnvironment(name),
+ output_ert_inset(os, p.ertEnvironment(name),
parent_context);
else
parse_text_snippet(p, os, FLAG_END, outer,
parent_context);
- handle_ert(os, "\\end{" + name + "}", parent_context);
+ output_ert_inset(os, "\\end{" + name + "}", parent_context);
if (specialfont)
parent_context.new_layout_allowed = new_layout_allowed;
}
LASSERT(t.cat() == catComment, return);
if (!t.cs().empty()) {
context.check_layout(os);
- handle_comment(os, '%' + t.cs(), context);
+ output_comment(p, os, t.cs(), context);
if (p.next_token().cat() == catNewline) {
// A newline after a comment line starts a new
// paragraph
// done (we might get called recursively)
context.new_paragraph(os);
} else
- handle_ert(os, "\n", context);
+ output_ert_inset(os, "\n", context);
eat_whitespace(p, os, context, true);
}
} else {
/// can understand
string const normalize_filename(string const & name)
{
- Parser p(trim(name, "\""));
+ Parser p(name);
ostringstream os;
while (p.good()) {
Token const & t = p.get_token();
} else if (t.cs() == "space") {
os << ' ';
p.skip_spaces();
+ } else if (t.cs() == "string") {
+ // Convert \string" to " and \string~ to ~
+ Token const & n = p.next_token();
+ if (n.asInput() != "\"" && n.asInput() != "~")
+ os << t.asInput();
} else
os << t.asInput();
}
- return os.str();
+ // Strip quotes. This is a bit complicated (see latex_path()).
+ string full = os.str();
+ if (!full.empty() && full[0] == '"') {
+ string base = removeExtension(full);
+ string ext = getExtension(full);
+ if (!base.empty() && base[base.length()-1] == '"')
+ // "a b"
+ // "a b".tex
+ return addExtension(trim(base, "\""), ext);
+ if (full[full.length()-1] == '"')
+ // "a b.c"
+ // "a b.c".tex
+ return trim(full, "\"");
+ }
+ return full;
}
/// Convert \p name from TeX convention (relative to master file) to LyX
/// convention (relative to .lyx file) if it is relative
-void fix_relative_filename(string & name)
+void fix_child_filename(string & name)
{
- if (FileName::isAbsolute(name))
- return;
-
- name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFileName()),
- from_utf8(getParentFilePath())));
+ string const absMasterTeX = getMasterFilePath(true);
+ bool const isabs = FileName::isAbsolute(name);
+ // convert from "relative to .tex master" to absolute original path
+ if (!isabs)
+ name = makeAbsPath(name, absMasterTeX).absFileName();
+ bool copyfile = copyFiles();
+ string const absParentLyX = getParentFilePath(false);
+ string abs = name;
+ if (copyfile) {
+ // convert from absolute original path to "relative to master file"
+ string const rel = to_utf8(makeRelPath(from_utf8(name),
+ from_utf8(absMasterTeX)));
+ // re-interpret "relative to .tex file" as "relative to .lyx file"
+ // (is different if the master .lyx file resides in a
+ // different path than the master .tex file)
+ string const absMasterLyX = getMasterFilePath(false);
+ abs = makeAbsPath(rel, absMasterLyX).absFileName();
+ // Do not copy if the new path is impossible to create. Example:
+ // absMasterTeX = "/foo/bar/"
+ // absMasterLyX = "/bar/"
+ // name = "/baz.eps" => new absolute name would be "/../baz.eps"
+ if (contains(name, "/../"))
+ copyfile = false;
+ }
+ if (copyfile) {
+ if (isabs)
+ name = abs;
+ else {
+ // convert from absolute original path to
+ // "relative to .lyx file"
+ name = to_utf8(makeRelPath(from_utf8(abs),
+ from_utf8(absParentLyX)));
+ }
+ }
+ else if (!isabs) {
+ // convert from absolute original path to "relative to .lyx file"
+ name = to_utf8(makeRelPath(from_utf8(name),
+ from_utf8(absParentLyX)));
+ }
}
-/// Parse a NoWeb Scrap section. The initial "<<" is already parsed.
-void parse_noweb(Parser & p, ostream & os, Context & context)
+void copy_file(FileName const & src, string dstname)
{
- // assemble the rest of the keyword
- string name("<<");
- bool scrap = false;
- while (p.good()) {
- Token const & t = p.get_token();
- if (t.asInput() == ">" && p.next_token().asInput() == ">") {
- name += ">>";
- p.get_token();
- scrap = (p.good() && p.next_token().asInput() == "=");
- if (scrap)
- name += p.get_token().asInput();
- break;
+ if (!copyFiles())
+ return;
+ string const absParent = getParentFilePath(false);
+ FileName dst;
+ if (FileName::isAbsolute(dstname))
+ dst = FileName(dstname);
+ else
+ dst = makeAbsPath(dstname, absParent);
+ FileName const srcpath = src.onlyPath();
+ FileName const dstpath = dst.onlyPath();
+ if (equivalent(srcpath, dstpath))
+ return;
+ if (!dstpath.isDirectory()) {
+ if (!dstpath.createPath()) {
+ cerr << "Warning: Could not create directory for file `"
+ << dst.absFileName() << "´." << endl;
+ return;
+ }
+ }
+ if (dst.isReadableFile()) {
+ if (overwriteFiles())
+ cerr << "Warning: Overwriting existing file `"
+ << dst.absFileName() << "´." << endl;
+ else {
+ cerr << "Warning: Not overwriting existing file `"
+ << dst.absFileName() << "´." << endl;
+ return;
}
- name += t.asInput();
}
+ if (!src.copyTo(dst))
+ cerr << "Warning: Could not copy file `" << src.absFileName()
+ << "´ to `" << dst.absFileName() << "´." << endl;
+}
- if (!scrap || !context.new_layout_allowed ||
- !context.textclass.hasLayout(from_ascii("Scrap"))) {
- cerr << "Warning: Could not interpret '" << name
- << "'. Ignoring it." << endl;
- return;
+
+/// Parse a literate Chunk section. The initial "<<" is already parsed.
+bool parse_chunk(Parser & p, ostream & os, Context & context)
+{
+ // check whether a chunk is possible here.
+ if (!context.textclass.hasInsetLayout(from_ascii("Flex:Chunk"))) {
+ return false;
}
- // We use new_paragraph instead of check_end_layout because the stuff
- // following the noweb chunk needs to start with a \begin_layout.
- // This may create a new paragraph even if there was none in the
- // noweb file, but the alternative is an invalid LyX file. Since
- // noweb code chunks are implemented with a layout style in LyX they
- // always must be in an own paragraph.
- context.new_paragraph(os);
- Context newcontext(true, context.textclass,
- &context.textclass[from_ascii("Scrap")]);
- newcontext.check_layout(os);
- os << name;
- while (p.good()) {
- Token const & t = p.get_token();
- // We abuse the parser a bit, because this is no TeX syntax
- // at all.
- if (t.cat() == catEscape)
- os << subst(t.asInput(), "\\", "\n\\backslash\n");
- else {
- ostringstream oss;
- Context tmp(false, context.textclass,
- &context.textclass[from_ascii("Scrap")]);
- tmp.need_end_layout = true;
- tmp.check_layout(oss);
- os << subst(t.asInput(), "\n", oss.str());
- }
- // The scrap chunk is ended by an @ at the beginning of a line.
- // After the @ the line may contain a comment and/or
- // whitespace, but nothing else.
- if (t.asInput() == "@" && p.prev_token().cat() == catNewline &&
- (p.next_token().cat() == catSpace ||
- p.next_token().cat() == catNewline ||
- p.next_token().cat() == catComment)) {
- while (p.good() && p.next_token().cat() == catSpace)
- os << p.get_token().asInput();
- if (p.next_token().cat() == catComment)
- // The comment includes a final '\n'
- os << p.get_token().asInput();
- else {
- if (p.next_token().cat() == catNewline)
- p.get_token();
- os << '\n';
- }
- break;
- }
+ p.pushPosition();
+
+ // read the parameters
+ Parser::Arg const params = p.verbatimStuff(">>=\n", false);
+ if (!params.first) {
+ p.popPosition();
+ return false;
}
- newcontext.check_end_layout(os);
+
+ Parser::Arg const code = p.verbatimStuff("\n@");
+ if (!code.first) {
+ p.popPosition();
+ return false;
+ }
+ string const post_chunk = p.verbatimStuff("\n").second + '\n';
+ if (post_chunk[0] != ' ' && post_chunk[0] != '\n') {
+ p.popPosition();
+ return false;
+ }
+ // The last newline read is important for paragraph handling
+ p.putback();
+ p.deparse();
+
+ //cerr << "params=[" << params.second << "], code=[" << code.second << "]" <<endl;
+ // We must have a valid layout before outputting the Chunk inset.
+ context.check_layout(os);
+ Context chunkcontext(true, context.textclass);
+ chunkcontext.layout = &context.textclass.plainLayout();
+ begin_inset(os, "Flex Chunk");
+ os << "\nstatus open\n";
+ if (!params.second.empty()) {
+ chunkcontext.check_layout(os);
+ Context paramscontext(true, context.textclass);
+ paramscontext.layout = &context.textclass.plainLayout();
+ begin_inset(os, "Argument 1");
+ os << "\nstatus open\n";
+ output_ert(os, params.second, paramscontext);
+ end_inset(os);
+ }
+ output_ert(os, code.second, chunkcontext);
+ end_inset(os);
+
+ p.dropPosition();
+ return true;
}
// followed by number?
if (p.next_token().cat() == catOther) {
- char c = p.getChar();
- paramtext += c;
+ string s = p.get_token().asInput();
+ paramtext += s;
// number = current arity + 1?
- if (c == arity + '0' + 1)
+ if (s.size() == 1 && s[0] == arity + '0' + 1)
++arity;
else
simple = false;
os << "\n\\def" << ert;
end_inset(os);
} else
- handle_ert(os, command + ert, context);
+ output_ert_inset(os, command + ert, context);
}
Layout const * newlayout = 0;
InsetLayout const * newinsetlayout = 0;
char const * const * where = 0;
- // Store the latest bibliographystyle and nocite{*} option
- // (needed for bibtex inset)
+ // Store the latest bibliographystyle, addcontentslineContent and
+ // nocite{*} option (needed for bibtex inset)
string btprint;
+ string contentslineContent;
string bibliographystyle = "default";
- bool const use_natbib = preamble.isPackageUsed("natbib");
- bool const use_jurabib = preamble.isPackageUsed("jurabib");
+ bool const use_natbib = isProvided("natbib");
+ bool const use_jurabib = isProvided("jurabib");
string last_env;
+
+ // it is impossible to determine the correct encoding for non-CJK Japanese.
+ // Therefore write a note at the beginning of the document
+ if (is_nonCJKJapanese) {
+ context.check_layout(os);
+ begin_inset(os, "Note Note\n");
+ os << "status open\n\\begin_layout Plain Layout\n"
+ << "\\series bold\n"
+ << "Important information:\n"
+ << "\\end_layout\n\n"
+ << "\\begin_layout Plain Layout\n"
+ << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
+ << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
+ << " The iconv encoding " << p.getEncoding() << " was used.\n"
+ << " If this is incorrect, you must run the tex2lyx program on the command line\n"
+ << " and specify the encoding using the -e command-line switch.\n"
+ << " In addition, you might want to double check that the desired output encoding\n"
+ << " is correctly selected in Document > Settings > Language.\n"
+ << "\\end_layout\n";
+ end_inset(os);
+ is_nonCJKJapanese = false;
+ }
+
while (p.good()) {
Token const & t = p.get_token();
-
#ifdef FILEDEBUG
debugToken(cerr, t, flags);
#endif
//
// cat codes
//
+ bool const starred = p.next_token().asInput() == "*";
+ string const starredname(starred ? (t.cs() + '*') : t.cs());
if (t.cat() == catMath) {
// we are inside some text mode thingy, so opening new math is allowed
context.check_layout(os);
skip_braces(p);
}
- else if (t.asInput() == "<" && p.next_token().asInput() == "<") {
- context.check_layout(os);
- begin_inset(os, "Quotes ");
- os << "ard";
- end_inset(os);
- p.get_token();
- skip_braces(p);
- }
-
else if (t.asInput() == "<"
- && p.next_token().asInput() == "<" && noweb_mode) {
- p.get_token();
- parse_noweb(p, os, context);
+ && p.next_token().asInput() == "<") {
+ bool has_chunk = false;
+ if (noweb_mode) {
+ p.pushPosition();
+ p.get_token();
+ has_chunk = parse_chunk(p, os, context);
+ if (!has_chunk)
+ p.popPosition();
+ }
+
+ if (!has_chunk) {
+ context.check_layout(os);
+ begin_inset(os, "Quotes ");
+ //FIXME: this is a right danish quote;
+ // why not a left french quote?
+ os << "ard";
+ end_inset(os);
+ p.get_token();
+ skip_braces(p);
+ }
}
else if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph()))
else
cerr << "Warning: Inserting missing ']' in '"
<< s << "'." << endl;
- handle_ert(os, s, context);
+ output_ert_inset(os, s, context);
}
else if (t.cat() == catLetter) {
context.check_layout(os);
- // Workaround for bug 4752.
- // FIXME: This whole code block needs to be removed
- // when the bug is fixed and tex2lyx produces
- // the updated file format.
- // The replacement algorithm in LyX is so stupid that
- // it even translates a phrase if it is part of a word.
- bool handled = false;
- for (int const * l = known_phrase_lengths; *l; ++l) {
- string phrase = t.cs();
- for (int i = 1; i < *l && p.next_token().isAlnumASCII(); ++i)
- phrase += p.get_token().cs();
- if (is_known(phrase, known_coded_phrases)) {
- handle_ert(os, phrase, context);
- handled = true;
- break;
- } else {
- for (size_t i = 1; i < phrase.length(); ++i)
- p.putback();
- }
- }
- if (!handled)
- os << t.cs();
+ os << t.cs();
}
else if (t.cat() == catOther ||
t.cat() == catAlign ||
t.cat() == catParameter) {
- // This translates "&" to "\\&" which may be wrong...
context.check_layout(os);
- os << t.cs();
+ if (t.asInput() == "-" && p.next_token().asInput() == "-" &&
+ context.merging_hyphens_allowed &&
+ context.font.family != "ttfamily" &&
+ !context.layout->pass_thru) {
+ if (p.next_next_token().asInput() == "-") {
+ // --- is emdash
+ os << to_utf8(docstring(1, 0x2014));
+ p.get_token();
+ } else
+ // -- is endash
+ os << to_utf8(docstring(1, 0x2013));
+ p.get_token();
+ } else
+ // This translates "&" to "\\&" which may be wrong...
+ os << t.cs();
}
else if (p.isParagraph()) {
if (context.new_layout_allowed)
context.new_paragraph(os);
else
- handle_ert(os, "\\par ", context);
+ output_ert_inset(os, "\\par ", context);
eat_whitespace(p, os, context, true);
}
Token const next = p.next_token();
Token const end = p.next_next_token();
if (next.cat() == catEnd) {
- // {}
- Token const prev = p.prev_token();
- p.get_token();
- if (p.next_token().character() == '`' ||
- (prev.character() == '-' &&
- p.next_token().character() == '-'))
- ; // ignore it in {}`` or -{}-
- else
- handle_ert(os, "{}", context);
+ // {}
+ Token const prev = p.prev_token();
+ p.get_token();
+ if (p.next_token().character() == '`')
+ ; // ignore it in {}``
+ else
+ output_ert_inset(os, "{}", context);
} else if (next.cat() == catEscape &&
is_known(next.cs(), known_quotes) &&
end.cat() == catEnd) {
// braces here for better readability.
parse_text_snippet(p, os, FLAG_BRACE_LAST,
outer, context);
+ } else if (p.next_token().asInput() == "\\ascii") {
+ // handle the \ascii characters
+ // (the case without braces is handled later)
+ // the code is "{\ascii\xxx}"
+ p.get_token(); // eat \ascii
+ string name2 = p.get_token().asInput();
+ p.get_token(); // eat the final '}'
+ string const name = "{\\ascii" + name2 + "}";
+ bool termination;
+ docstring rem;
+ set<string> req;
+ // get the character from unicodesymbols
+ docstring s = encodings.fromLaTeXCommand(from_utf8(name),
+ Encodings::TEXT_CMD, termination, rem, &req);
+ if (!s.empty()) {
+ context.check_layout(os);
+ os << to_utf8(s);
+ if (!rem.empty())
+ output_ert_inset(os,
+ to_utf8(rem), context);
+ for (set<string>::const_iterator it = req.begin();
+ it != req.end(); ++it)
+ preamble.registerAutomaticallyLoadedPackage(*it);
+ } else
+ // we did not find a non-ert version
+ output_ert_inset(os, name, context);
} else {
context.check_layout(os);
// special handling of font attribute changes
p.get_token();
} else {
p.putback();
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os,
FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
} else if (! context.new_layout_allowed) {
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os, FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else if (is_known(next.cs(), known_sizes)) {
// next will change the size, so we must
// reset it here
<< "\n\\shape "
<< context.font.shape << "\n";
} else {
- handle_ert(os, "{", context);
+ output_ert_inset(os, "{", context);
parse_text_snippet(p, os, FLAG_BRACE_LAST,
outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
}
}
return;
}
cerr << "stray '}' in text\n";
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
}
else if (t.cat() == catComment)
// control sequences
//
- else if (t.cs() == "(") {
- context.check_layout(os);
- begin_inset(os, "Formula");
- os << " \\(";
- parse_math(p, os, FLAG_SIMPLE2, MATH_MODE);
- os << "\\)";
- end_inset(os);
- }
-
- else if (t.cs() == "[") {
+ else if (t.cs() == "(" || t.cs() == "[") {
+ bool const simple = t.cs() == "(";
context.check_layout(os);
begin_inset(os, "Formula");
- os << " \\[";
- parse_math(p, os, FLAG_EQUATION, MATH_MODE);
- os << "\\]";
+ os << " \\" << t.cs();
+ parse_math(p, os, simple ? FLAG_SIMPLE2 : FLAG_EQUATION, MATH_MODE);
+ os << '\\' << (simple ? ')' : ']');
end_inset(os);
- // Prevent the conversion of a line break to a space
- // (bug 7668). This does not change the output, but
- // looks ugly in LyX.
- eat_whitespace(p, os, context, false);
+ if (!simple) {
+ // Prevent the conversion of a line break to a
+ // space (bug 7668). This does not change the
+ // output, but looks ugly in LyX.
+ eat_whitespace(p, os, context, false);
+ }
}
else if (t.cs() == "begin")
// FIXME: Do this in check_layout()!
context.has_item = false;
if (optarg)
- handle_ert(os, "\\item", context);
+ output_ert_inset(os, "\\item", context);
else
- handle_ert(os, "\\item ", context);
+ output_ert_inset(os, "\\item ", context);
}
if (optarg) {
if (context.layout->labeltype != LABEL_MANUAL) {
- // LyX does not support \item[\mybullet]
- // in itemize environments
+ // handle option of itemize item
+ begin_inset(os, "Argument item:1\n");
+ os << "status open\n";
+ os << "\n\\begin_layout Plain Layout\n";
Parser p2(s + ']');
os << parse_text_snippet(p2,
FLAG_BRACK_LAST, outer, context);
+ // we must not use context.check_end_layout(os)
+ // because that would close the outer itemize layout
+ os << "\n\\end_layout\n";
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
} else if (!s.empty()) {
// LyX adds braces around the argument,
// so we need to remove them here.
// would misinterpret the space as
// item delimiter (bug 7663)
if (contains(s, ' ')) {
- handle_ert(os, s, context);
+ output_ert_inset(os, s, context);
} else {
Parser p2(s + ']');
os << parse_text_snippet(p2,
- FLAG_BRACK_LAST,
- outer, context);
+ FLAG_BRACK_LAST, outer, context);
}
// The space is needed to separate the
// item from the rest of the sentence.
string key = convert_command_inset_arg(p.verbatim_item());
if (contains(label, '\\') || contains(key, '\\')) {
// LyX can't handle LaTeX commands in labels or keys
- handle_ert(os, t.asInput() + '[' + label +
+ output_ert_inset(os, t.asInput() + '[' + label +
"]{" + p.verbatim_item() + '}',
context);
} else {
skip_braces(p);
p.get_token();
string name = normalize_filename(p.verbatim_item());
- string const path = getMasterFilePath();
+ string const path = getMasterFilePath(true);
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
// The file extension is in every case ".tex".
if (!Gnumeric_name.empty())
name = Gnumeric_name;
}
- if (makeAbsPath(name, path).exists())
- fix_relative_filename(name);
- else
+ FileName const absname = makeAbsPath(name, path);
+ if (absname.exists()) {
+ fix_child_filename(name);
+ copy_file(absname, name);
+ } else
cerr << "Warning: Could not find file '"
<< name << "'." << endl;
context.check_layout(os);
end_inset(os);
context.check_layout(os);
macro = false;
- // register the packages that are automatically reloaded
+ // register the packages that are automatically loaded
// by the Gnumeric template
registerExternalTemplatePackages("GnumericSpreadsheet");
}
context.check_layout(os);
// FIXME: This is a hack to prevent paragraph
// deletion if it is empty. Handle this better!
- handle_comment(os,
- "%dummy comment inserted by tex2lyx to "
+ output_comment(p, os,
+ "dummy comment inserted by tex2lyx to "
"ensure that this paragraph is not empty",
context);
// Both measures above may generate an additional
for (; it != en; ++it)
preamble.registerAutomaticallyLoadedPackage(*it);
} else
- handle_ert(os,
+ output_ert_inset(os,
"\\date{" + p.verbatim_item() + '}',
context);
}
preamble.registerAutomaticallyLoadedPackage(*it);
}
- else if (t.cs() == "caption") {
- p.skip_spaces();
- context.check_layout(os);
- p.skip_spaces();
- begin_inset(os, "Caption\n");
- Context newcontext(true, context.textclass);
- newcontext.font = context.font;
- newcontext.check_layout(os);
- if (p.next_token().cat() != catEscape &&
- p.next_token().character() == '[') {
- p.get_token(); // eat '['
- begin_inset(os, "Argument\n");
- os << "status collapsed\n";
- parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- }
- parse_text(p, os, FLAG_ITEM, outer, context);
- context.check_end_layout(os);
- // We don't need really a new paragraph, but
- // we must make sure that the next item gets a \begin_layout.
- context.new_paragraph(os);
- end_inset(os);
- p.skip_spaces();
- newcontext.check_end_layout(os);
- }
-
else if (t.cs() == "subfloat") {
- // the syntax is \subfloat[caption]{content}
+ // the syntax is \subfloat[list entry][sub caption]{content}
// if it is a table of figure depends on the surrounding float
- bool has_caption = false;
+ // FIXME: second optional argument is not parsed
p.skip_spaces();
// do nothing if there is no outer float
if (!float_type.empty()) {
<< "\nstatus collapsed\n\n";
// test for caption
string caption;
+ bool has_caption = false;
if (p.next_token().cat() != catEscape &&
p.next_token().character() == '[') {
p.get_token(); // eat '['
// we must make sure that the caption gets a \begin_layout
os << "\n\\begin_layout Plain Layout";
p.skip_spaces();
- begin_inset(os, "Caption\n");
- Context newcontext(true, context.textclass);
- newcontext.font = context.font;
+ begin_inset(os, "Caption Standard\n");
+ Context newcontext(true, context.textclass,
+ 0, 0, context.font);
newcontext.check_layout(os);
os << caption << "\n";
newcontext.check_end_layout(os);
// output it as ERT
if (p.hasOpt()) {
string opt_arg = convert_command_inset_arg(p.getArg('[', ']'));
- handle_ert(os, t.asInput() + '[' + opt_arg +
+ output_ert_inset(os, t.asInput() + '[' + opt_arg +
"]{" + p.verbatim_item() + '}', context);
} else
- handle_ert(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
+ output_ert_inset(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
}
}
opts["clip"] = string();
string name = normalize_filename(p.verbatim_item());
- string const path = getMasterFilePath();
+ string const path = getMasterFilePath(true);
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
if (!makeAbsPath(name, path).exists()) {
}
}
- if (makeAbsPath(name, path).exists())
- fix_relative_filename(name);
- else
+ FileName const absname = makeAbsPath(name, path);
+ if (absname.exists()) {
+ fix_child_filename(name);
+ copy_file(absname, name);
+ } else
cerr << "Warning: Could not find graphics file '"
<< name << "'." << endl;
if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
os << s;
else
- handle_ert(os, "\\ensuremath{" + s + "}",
+ output_ert_inset(os, "\\ensuremath{" + s + "}",
context);
}
// swallow this
skip_spaces_braces(p);
} else
- handle_ert(os, t.asInput(), context);
+ output_ert_inset(os, t.asInput(), context);
}
else if (t.cs() == "tableofcontents" || t.cs() == "lstlistoflistings") {
preamble.registerAutomaticallyLoadedPackage("listings");
}
- else if (t.cs() == "listoffigures") {
- context.check_layout(os);
- begin_inset(os, "FloatList figure\n");
- end_inset(os);
- skip_spaces_braces(p);
- }
-
- else if (t.cs() == "listoftables") {
+ else if (t.cs() == "listoffigures" || t.cs() == "listoftables") {
context.check_layout(os);
- begin_inset(os, "FloatList table\n");
+ if (t.cs() == "listoffigures")
+ begin_inset(os, "FloatList figure\n");
+ else
+ begin_inset(os, "FloatList table\n");
end_inset(os);
skip_spaces_braces(p);
}
end_inset(os);
p.get_token(); // swallow second arg
} else
- handle_ert(os, "\\listof{" + name + "}", context);
+ output_ert_inset(os, "\\listof{" + name + "}", context);
}
else if ((where = is_known(t.cs(), known_text_font_families)))
else if (t.cs() == "textcolor") {
// scheme is \textcolor{color name}{text}
string const color = p.verbatim_item();
- // we only support the predefined colors of the color package
+ // we support the predefined colors of the color and the xcolor package
if (color == "black" || color == "blue" || color == "cyan"
|| color == "green" || color == "magenta" || color == "red"
|| color == "white" || color == "yellow") {
context.check_layout(os);
os << "\n\\color inherit\n";
preamble.registerAutomaticallyLoadedPackage("color");
+ } else if (color == "brown" || color == "darkgray" || color == "gray"
+ || color == "lightgray" || color == "lime" || color == "olive"
+ || color == "orange" || color == "pink" || color == "purple"
+ || color == "teal" || color == "violet") {
+ context.check_layout(os);
+ os << "\n\\color " << color << "\n";
+ parse_text_snippet(p, os, FLAG_ITEM, outer, context);
+ context.check_layout(os);
+ os << "\n\\color inherit\n";
+ preamble.registerAutomaticallyLoadedPackage("xcolor");
} else
// for custom defined colors
- handle_ert(os, t.asInput() + "{" + color + "}", context);
+ output_ert_inset(os, t.asInput() + "{" + color + "}", context);
}
else if (t.cs() == "underbar" || t.cs() == "uline") {
string localtime = p.getArg('{', '}');
preamble.registerAuthor(name);
Author const & author = preamble.getAuthor(name);
- // from_ctime() will fail if LyX decides to output the
- // time in the text language. It might also use a wrong
- // time zone (if the original LyX document was exported
- // with a different time zone).
- time_t ptime = from_ctime(localtime);
+ // from_asctime_utc() will fail if LyX decides to output the
+ // time in the text language.
+ time_t ptime = from_asctime_utc(localtime);
if (ptime == static_cast<time_t>(-1)) {
cerr << "Warning: Could not parse time `" << localtime
<< "´ for change tracking, using current time instead.\n";
}
}
+ else if (t.cs() == "textipa") {
+ context.check_layout(os);
+ begin_inset(os, "IPA\n");
+ bool merging_hyphens_allowed = context.merging_hyphens_allowed;
+ context.merging_hyphens_allowed = false;
+ parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
+ context.merging_hyphens_allowed = merging_hyphens_allowed;
+ end_inset(os);
+ preamble.registerAutomaticallyLoadedPackage("tipa");
+ preamble.registerAutomaticallyLoadedPackage("tipx");
+ }
+
+ else if (t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
+ context.check_layout(os);
+ begin_inset(os, "IPADeco " + t.cs().substr(4) + "\n");
+ os << "status open\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
+ end_inset(os);
+ p.skip_spaces();
+ }
+
+ else if (t.cs() == "textvertline") {
+ // FIXME: This is not correct, \textvertline is higher than |
+ os << "|";
+ skip_braces(p);
+ continue;
+ }
+
+ else if (t.cs() == "tone" ) {
+ context.check_layout(os);
+ // register the tone package
+ preamble.registerAutomaticallyLoadedPackage("tone");
+ string content = trimSpaceAndEol(p.verbatim_item());
+ string command = t.asInput() + "{" + content + "}";
+ // some tones can be detected by unicodesymbols, some need special code
+ if (is_known(content, known_tones)) {
+ os << "\\IPAChar " << command << "\n";
+ continue;
+ }
+ // try to see whether the string is in unicodesymbols
+ bool termination;
+ docstring rem;
+ set<string> req;
+ docstring s = encodings.fromLaTeXCommand(from_utf8(command),
+ Encodings::TEXT_CMD | Encodings::MATH_CMD,
+ termination, rem, &req);
+ if (!s.empty()) {
+ os << to_utf8(s);
+ if (!rem.empty())
+ output_ert_inset(os, to_utf8(rem), context);
+ for (set<string>::const_iterator it = req.begin();
+ it != req.end(); ++it)
+ preamble.registerAutomaticallyLoadedPackage(*it);
+ } else
+ // we did not find a non-ert version
+ output_ert_inset(os, command, context);
+ }
+
else if (t.cs() == "phantom" || t.cs() == "hphantom" ||
t.cs() == "vphantom") {
context.check_layout(os);
else if (t.cs() == "href") {
context.check_layout(os);
- string target = p.getArg('{', '}');
- string name = p.getArg('{', '}');
+ string target = convert_command_inset_arg(p.verbatim_item());
+ string name = convert_command_inset_arg(p.verbatim_item());
string type;
size_t i = target.find(':');
if (i != string::npos) {
end_inset(os);
}
- else if (is_known(t.cs(), known_phrases) ||
- (t.cs() == "protect" &&
- p.next_token().cat() == catEscape &&
- is_known(p.next_token().cs(), known_phrases))) {
- // LyX sometimes puts a \protect in front, so we have to ignore it
- // FIXME: This needs to be changed when bug 4752 is fixed.
- where = is_known(
- t.cs() == "protect" ? p.get_token().cs() : t.cs(),
- known_phrases);
+ // handle refstyle first to catch \eqref which can also occur
+ // without refstyle. Only recognize these commands if
+ // refstyle.sty was found in the preamble (otherwise \eqref
+ // and user defined ref commands could be misdetected).
+ else if ((where = is_known(t.cs(), known_refstyle_commands)) &&
+ preamble.refstyle()) {
context.check_layout(os);
- os << known_coded_phrases[where - known_phrases];
- skip_spaces_braces(p);
+ begin_command_inset(os, "ref", "formatted");
+ os << "reference \"";
+ os << known_refstyle_prefixes[where - known_refstyle_commands]
+ << ":";
+ os << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ preamble.registerAutomaticallyLoadedPackage("refstyle");
}
- else if ((where = is_known(t.cs(), known_ref_commands))) {
+ // if refstyle is used, we must not convert \prettyref to a
+ // formatted reference, since that would result in a refstyle command.
+ else if ((where = is_known(t.cs(), known_ref_commands)) &&
+ (t.cs() != "prettyref" || !preamble.refstyle())) {
string const opt = p.getOpt();
if (opt.empty()) {
context.check_layout(os);
end_inset(os);
if (t.cs() == "vref" || t.cs() == "vpageref")
preamble.registerAutomaticallyLoadedPackage("varioref");
-
+ else if (t.cs() == "prettyref")
+ preamble.registerAutomaticallyLoadedPackage("prettyref");
} else {
- // LyX does not support optional arguments of ref commands
- handle_ert(os, t.asInput() + '[' + opt + "]{" +
- p.verbatim_item() + "}", context);
+ // LyX does not yet support optional arguments of ref commands
+ output_ert_inset(os, t.asInput() + '[' + opt + "]{" +
+ p.verbatim_item() + '}', context);
}
}
<< convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
end_inset(os);
+ // Need to set the cite engine if natbib is loaded by
+ // the document class directly
+ if (preamble.citeEngine() == "basic")
+ preamble.citeEngine("natbib");
}
else if (use_jurabib &&
os << "before " << '"' << before << '"' << "\n";
os << "key " << '"' << citation << '"' << "\n";
end_inset(os);
+ // Need to set the cite engine if jurabib is loaded by
+ // the document class directly
+ if (preamble.citeEngine() == "basic")
+ preamble.citeEngine("jurabib");
}
else if (t.cs() == "cite"
end_inset(os);
}
- else if (t.cs() == "printindex") {
+ else if (t.cs() == "printindex" || t.cs() == "printsubindex") {
context.check_layout(os);
- begin_command_inset(os, "index_print", "printindex");
- os << "type \"idx\"\n";
+ string commandname = t.cs();
+ bool star = false;
+ if (p.next_token().asInput() == "*") {
+ commandname += "*";
+ star = true;
+ p.get_token();
+ }
+ begin_command_inset(os, "index_print", commandname);
+ string const indexname = p.getArg('[', ']');
+ if (!star) {
+ if (indexname.empty())
+ os << "type \"idx\"\n";
+ else
+ os << "type \"" << indexname << "\"\n";
+ }
end_inset(os);
skip_spaces_braces(p);
preamble.registerAutomaticallyLoadedPackage("makeidx");
context.check_layout(os);
begin_inset(os, "script ");
os << t.cs().substr(4) << '\n';
- parse_text_in_inset(p, os, FLAG_ITEM, false, context);
+ newinsetlayout = findInsetLayout(context.textclass, t.cs(), true);
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
end_inset(os);
if (t.cs() == "textsubscript")
preamble.registerAutomaticallyLoadedPackage("subscript");
context.check_layout(os);
// save the language for the case that a
// \foreignlanguage is used
-
context.font.language = babel2lyx(p.verbatim_item());
os << "\n\\lang " << context.font.language << "\n";
}
context.font.language, lang);
}
+ else if (prefixIs(t.cs(), "text") && preamble.usePolyglossia()
+ && is_known(t.cs().substr(4), preamble.polyglossia_languages)) {
+ // scheme is \textLANGUAGE{text} where LANGUAGE is in polyglossia_languages[]
+ string lang;
+ // We have to output the whole command if it has an option
+ // because LyX doesn't support this yet, see bug #8214,
+ // only if there is a single option specifying a variant, we can handle it.
+ if (p.hasOpt()) {
+ string langopts = p.getOpt();
+ // check if the option contains a variant, if yes, extract it
+ string::size_type pos_var = langopts.find("variant");
+ string::size_type i = langopts.find(',');
+ string::size_type k = langopts.find('=', pos_var);
+ if (pos_var != string::npos && i == string::npos) {
+ string variant;
+ variant = langopts.substr(k + 1, langopts.length() - k - 2);
+ lang = preamble.polyglossia2lyx(variant);
+ parse_text_attributes(p, os, FLAG_ITEM, outer,
+ context, "\\lang",
+ context.font.language, lang);
+ } else
+ output_ert_inset(os, t.asInput() + langopts, context);
+ } else {
+ lang = preamble.polyglossia2lyx(t.cs().substr(4, string::npos));
+ parse_text_attributes(p, os, FLAG_ITEM, outer,
+ context, "\\lang",
+ context.font.language, lang);
+ }
+ }
+
else if (t.cs() == "inputencoding") {
// nothing to write here
string const enc = subst(p.verbatim_item(), "\n", " ");
- p.setEncoding(enc);
+ p.setEncoding(enc, Encoding::inputenc);
}
- else if ((where = is_known(t.cs(), known_special_chars))) {
+ else if (is_known(t.cs(), known_special_chars) ||
+ (t.cs() == "protect" &&
+ p.next_token().cat() == catEscape &&
+ is_known(p.next_token().cs(), known_special_protect_chars))) {
+ // LyX sometimes puts a \protect in front, so we have to ignore it
+ where = is_known(
+ t.cs() == "protect" ? p.get_token().cs() : t.cs(),
+ known_special_chars);
context.check_layout(os);
- os << "\\SpecialChar \\"
- << known_coded_special_chars[where - known_special_chars]
- << '\n';
+ os << known_coded_special_chars[where - known_special_chars];
skip_spaces_braces(p);
}
- else if (t.cs() == "nobreakdash" && p.next_token().asInput() == "-") {
+ else if ((t.cs() == "nobreakdash" && p.next_token().asInput() == "-") ||
+ (t.cs() == "protect" && p.next_token().asInput() == "\\nobreakdash" &&
+ p.next_next_token().asInput() == "-") ||
+ (t.cs() == "@" && p.next_token().asInput() == ".")) {
+ // LyX sometimes puts a \protect in front, so we have to ignore it
+ if (t.cs() == "protect")
+ p.get_token();
context.check_layout(os);
- os << "\\SpecialChar \\nobreakdash-\n";
+ if (t.cs() == "nobreakdash")
+ os << "\\SpecialChar nobreakdash\n";
+ else
+ os << "\\SpecialChar endofsentence\n";
p.get_token();
}
skip_braces(p);
}
- else if (t.cs() == "@" && p.next_token().asInput() == ".") {
- context.check_layout(os);
- os << "\\SpecialChar \\@.\n";
- p.get_token();
- }
-
- else if (t.cs() == "-") {
- context.check_layout(os);
- os << "\\SpecialChar \\-\n";
- }
-
- else if (t.cs() == "textasciitilde") {
- context.check_layout(os);
- os << '~';
- skip_spaces_braces(p);
- }
-
- else if (t.cs() == "textasciicircum") {
- context.check_layout(os);
- os << '^';
- skip_spaces_braces(p);
- }
-
- else if (t.cs() == "textbackslash") {
- context.check_layout(os);
- os << "\n\\backslash\n";
- skip_spaces_braces(p);
- }
-
else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
|| t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
- || t.cs() == "%") {
+ || t.cs() == "%" || t.cs() == "-") {
context.check_layout(os);
- os << t.cs();
+ if (t.cs() == "-")
+ os << "\\SpecialChar softhyphen\n";
+ else
+ os << t.cs();
}
else if (t.cs() == "char") {
os << '"';
skip_braces(p);
} else {
- handle_ert(os, "\\char`", context);
+ output_ert_inset(os, "\\char`", context);
}
} else {
- handle_ert(os, "\\char", context);
+ output_ert_inset(os, "\\char", context);
}
}
else if (t.cs() == "verb") {
context.check_layout(os);
- char const delimiter = p.next_token().character();
- string const arg = p.getArg(delimiter, delimiter);
- ostringstream oss;
- oss << "\\verb" << delimiter << arg << delimiter;
- handle_ert(os, oss.str(), context);
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ Parser::Arg arg = p.verbatimStuff(delim);
+ if (arg.first)
+ output_ert_inset(os, "\\verb" + delim
+ + arg.second + delim, context);
+ else
+ cerr << "invalid \\verb command. Skipping" << endl;
}
// Problem: \= creates a tabstop inside the tabbing environment
// and else an accent. In the latter case we really would want
// \={o} instead of \= o.
else if (t.cs() == "=" && (flags & FLAG_TABBING))
- handle_ert(os, t.asInput(), context);
-
- // accents (see Table 6 in Comprehensive LaTeX Symbol List)
- else if (t.cs().size() == 1
- && contains("\"'.=^`bcdHkrtuv~", t.cs())) {
- context.check_layout(os);
- // try to see whether the string is in unicodesymbols
- bool termination;
- docstring rem;
- string command = t.asInput() + "{"
- + trimSpaceAndEol(p.verbatim_item())
- + "}";
- set<string> req;
- docstring s = encodings.fromLaTeXCommand(from_utf8(command),
- Encodings::TEXT_CMD | Encodings::MATH_CMD,
- termination, rem, &req);
- if (!s.empty()) {
- if (!rem.empty())
- cerr << "When parsing " << command
- << ", result is " << to_utf8(s)
- << "+" << to_utf8(rem) << endl;
- os << to_utf8(s);
- for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
- preamble.registerAutomaticallyLoadedPackage(*it);
- } else
- // we did not find a non-ert version
- handle_ert(os, command, context);
- }
+ output_ert_inset(os, t.asInput(), context);
else if (t.cs() == "\\") {
context.check_layout(os);
if (p.hasOpt())
- handle_ert(os, "\\\\" + p.getOpt(), context);
+ output_ert_inset(os, "\\\\" + p.getOpt(), context);
else if (p.next_token().asInput() == "*") {
p.get_token();
// getOpt() eats the following space if there
// is no optional argument, but that is OK
// here since it has no effect in the output.
- handle_ert(os, "\\\\*" + p.getOpt(), context);
+ output_ert_inset(os, "\\\\*" + p.getOpt(), context);
}
else {
begin_inset(os, "Newline newline");
name += p.get_token().asInput();
context.check_layout(os);
string filename(normalize_filename(p.getArg('{', '}')));
- string const path = getMasterFilePath();
+ string const path = getMasterFilePath(true);
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
if ((t.cs() == "include" || t.cs() == "input") &&
if (makeAbsPath(filename, path).exists()) {
string const abstexname =
makeAbsPath(filename, path).absFileName();
- string const abslyxname =
- changeExtension(abstexname, ".lyx");
string const absfigname =
changeExtension(abstexname, ".fig");
- fix_relative_filename(filename);
- string const lyxname =
- changeExtension(filename, ".lyx");
+ fix_child_filename(filename);
+ string const lyxname = changeExtension(filename,
+ roundtripMode() ? ".lyx.lyx" : ".lyx");
+ string const abslyxname = makeAbsPath(
+ lyxname, getParentFilePath(false)).absFileName();
bool xfig = false;
- external = FileName(absfigname).exists();
- if (t.cs() == "input") {
+ if (!skipChildren())
+ external = FileName(absfigname).exists();
+ if (t.cs() == "input" && !skipChildren()) {
string const ext = getExtension(abstexname);
// Combined PS/LaTeX:
}
if (external) {
outname = changeExtension(filename, ".fig");
+ FileName abssrc(changeExtension(abstexname, ".fig"));
+ copy_file(abssrc, outname);
} else if (xfig) {
// Don't try to convert, the result
// would be full of ERT.
outname = filename;
+ FileName abssrc(abstexname);
+ copy_file(abssrc, outname);
} else if (t.cs() != "verbatiminput" &&
+ !skipChildren() &&
tex2lyx(abstexname, FileName(abslyxname),
p.getEncoding())) {
outname = lyxname;
+ // no need to call copy_file
+ // tex2lyx creates the file
} else {
outname = filename;
+ FileName abssrc(abstexname);
+ copy_file(abssrc, outname);
}
} else {
cerr << "Warning: Could not find included file '"
registerExternalTemplatePackages("XFig");
} else {
begin_command_inset(os, "include", name);
+ outname = subst(outname, "\"", "\\\"");
os << "preview false\n"
"filename \"" << outname << "\"\n";
if (t.cs() == "verbatiminput")
else if (t.cs() == "bibliographystyle") {
// store new bibliographystyle
bibliographystyle = p.verbatim_item();
- // If any other command than \bibliography and
- // \nocite{*} follows, we need to output the style
+ // If any other command than \bibliography, \addcontentsline
+ // and \nocite{*} follows, we need to output the style
// (because it might be used by that command).
// Otherwise, it will automatically be output by LyX.
p.pushPosition();
continue;
} else if (t2.cs() == "bibliography")
output = false;
+ else if (t2.cs() == "phantomsection") {
+ output = false;
+ continue;
+ }
+ else if (t2.cs() == "addcontentsline") {
+ // get the 3 arguments of \addcontentsline
+ p.getArg('{', '}');
+ p.getArg('{', '}');
+ contentslineContent = p.getArg('{', '}');
+ // if the last argument is not \refname we must output
+ if (contentslineContent == "\\refname")
+ output = false;
+ }
break;
}
p.popPosition();
if (output) {
- handle_ert(os,
+ output_ert_inset(os,
"\\bibliographystyle{" + bibliographystyle + '}',
context);
}
}
+ else if (t.cs() == "phantomsection") {
+ // we only support this if it occurs between
+ // \bibliographystyle and \bibliography
+ if (bibliographystyle.empty())
+ output_ert_inset(os, "\\phantomsection", context);
+ }
+
+ else if (t.cs() == "addcontentsline") {
+ context.check_layout(os);
+ // get the 3 arguments of \addcontentsline
+ string const one = p.getArg('{', '}');
+ string const two = p.getArg('{', '}');
+ string const three = p.getArg('{', '}');
+ // only if it is a \refname, we support if for the bibtex inset
+ if (contentslineContent != "\\refname") {
+ output_ert_inset(os,
+ "\\addcontentsline{" + one + "}{" + two + "}{"+ three + '}',
+ context);
+ }
+ }
+
else if (t.cs() == "bibliography") {
context.check_layout(os);
+ string BibOpts;
begin_command_inset(os, "bibtex", "bibtex");
if (!btprint.empty()) {
os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
btprint.clear();
}
os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
+ // Do we have addcontentsline?
+ if (contentslineContent == "\\refname") {
+ BibOpts = "bibtotoc";
+ // clear string because next BibTeX inset can be without addcontentsline
+ contentslineContent.clear();
+ }
// Do we have a bibliographystyle set?
- if (!bibliographystyle.empty())
- os << "options " << '"' << bibliographystyle << '"' << "\n";
+ if (!bibliographystyle.empty()) {
+ if (BibOpts.empty())
+ BibOpts = bibliographystyle;
+ else
+ BibOpts = BibOpts + ',' + bibliographystyle;
+ // clear it because each bibtex entry has its style
+ // and we need an empty string to handle \phantomsection
+ bibliographystyle.clear();
+ }
+ os << "options " << '"' << BibOpts << '"' << "\n";
end_inset(os);
}
context, "parbox", "shaded");
} else
parse_box(p, os, 0, FLAG_ITEM, outer, context,
- "", "", t.cs());
+ "", "", t.cs(), "", "");
}
- else if (t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
+ else if (t.cs() == "fbox" || t.cs() == "mbox" ||
+ t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
t.cs() == "shadowbox" || t.cs() == "doublebox")
parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), "");
+ else if (t.cs() == "fcolorbox" || t.cs() == "colorbox") {
+ string backgroundcolor;
+ preamble.registerAutomaticallyLoadedPackage("xcolor");
+ if (t.cs() == "fcolorbox") {
+ string const framecolor = p.getArg('{', '}');
+ backgroundcolor = p.getArg('{', '}');
+ parse_box(p, os, 0, 0, outer, context, "", "", "", framecolor, backgroundcolor);
+ } else {
+ backgroundcolor = p.getArg('{', '}');
+ parse_box(p, os, 0, 0, outer, context, "", "", "", "", backgroundcolor);
+ }
+ }
+
+ // FIXME: due to the compiler limit of "if" nestings
+ // the code for the alignment was put here
+ // put them in their own if if this is fixed
+ else if (t.cs() == "fboxrule" || t.cs() == "fboxsep"
+ || t.cs() == "shadowsize"
+ || t.cs() == "raggedleft" || t.cs() == "centering"
+ || t.cs() == "raggedright") {
+ if (t.cs() == "fboxrule")
+ fboxrule = "";
+ if (t.cs() == "fboxsep")
+ fboxsep = "";
+ if (t.cs() == "shadowsize")
+ shadow_size = "";
+ if (t.cs() != "raggedleft" && t.cs() != "centering"
+ && t.cs() != "raggedright") {
+ p.skip_spaces(true);
+ while (p.good() && p.next_token().cat() != catSpace
+ && p.next_token().cat() != catNewline
+ && p.next_token().cat() != catEscape) {
+ if (t.cs() == "fboxrule")
+ fboxrule = fboxrule + p.get_token().asInput();
+ if (t.cs() == "fboxsep")
+ fboxsep = fboxsep + p.get_token().asInput();
+ if (t.cs() == "shadowsize")
+ shadow_size = shadow_size + p.get_token().asInput();
+ }
+ } else {
+ output_ert_inset(os, t.asInput(), context);
+ }
+ }
+
+ //\framebox() is part of the picture environment and different from \framebox{}
+ //\framebox{} will be parsed by parse_outer_box
else if (t.cs() == "framebox") {
if (p.next_token().character() == '(') {
//the syntax is: \framebox(x,y)[position]{content}
arg += p.getFullParentheseArg();
arg += p.getFullOpt();
eat_whitespace(p, os, context, false);
- handle_ert(os, arg + '{', context);
- eat_whitespace(p, os, context, false);
+ output_ert_inset(os, arg + '{', context);
parse_text(p, os, FLAG_ITEM, outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else {
+ //the syntax is: \framebox[width][position]{content}
string special = p.getFullOpt();
special += p.getOpt();
parse_outer_box(p, os, FLAG_ITEM, outer,
- context, t.cs(), special);
+ context, t.cs(), special);
}
}
arg += p.getFullParentheseArg();
arg += p.getFullOpt();
eat_whitespace(p, os, context, false);
- handle_ert(os, arg + '{', context);
- eat_whitespace(p, os, context, false);
+ output_ert_inset(os, arg + '{', context);
parse_text(p, os, FLAG_ITEM, outer, context);
- handle_ert(os, "}", context);
+ output_ert_inset(os, "}", context);
} else
//the syntax is: \makebox[width][position]{content}
parse_box(p, os, 0, FLAG_ITEM, outer, context,
- "", "", t.cs());
+ "", "", t.cs(), "", "");
}
else if (t.cs() == "smallskip" ||
t.cs() == "providecommand" ||
t.cs() == "providecommandx" ||
name[name.length()-1] == '*')
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
else {
context.check_layout(os);
begin_inset(os, "FormulaMacro");
CommandMap::iterator it = known_commands.find(command);
if (it != known_commands.end())
known_commands[t.asInput()] = it->second;
- handle_ert(os, ert, context);
+ output_ert_inset(os, ert, context);
}
else if (t.cs() == "hspace" || t.cs() == "vspace") {
- bool starred = false;
- if (p.next_token().asInput() == "*") {
+ if (starred)
p.get_token();
- starred = true;
- }
string name = t.asInput();
string const length = p.verbatim_item();
string unit;
case Length::MU:
known_unit = true;
break;
- default:
+ default: {
+ //unitFromString(unit) fails for relative units like Length::PCW
+ // therefore handle them separately
+ if (unit == "\\paperwidth" || unit == "\\columnwidth"
+ || unit == "\\textwidth" || unit == "\\linewidth"
+ || unit == "\\textheight" || unit == "\\paperheight")
+ known_unit = true;
break;
+ }
}
}
}
- if (t.cs()[0] == 'h' && (known_unit || known_hspace)) {
+ // check for glue lengths
+ bool is_gluelength = false;
+ string gluelength = length;
+ string::size_type i = length.find(" minus");
+ if (i == string::npos) {
+ i = length.find(" plus");
+ if (i != string::npos)
+ is_gluelength = true;
+ } else
+ is_gluelength = true;
+ // if yes transform "9xx minus 8yy plus 7zz"
+ // to "9xx-8yy+7zz"
+ if (is_gluelength) {
+ i = gluelength.find(" minus");
+ if (i != string::npos)
+ gluelength.replace(i, 7, "-");
+ i = gluelength.find(" plus");
+ if (i != string::npos)
+ gluelength.replace(i, 6, "+");
+ }
+
+ if (t.cs()[0] == 'h' && (known_unit || known_hspace || is_gluelength)) {
// Literal horizontal length or known variable
context.check_layout(os);
begin_inset(os, "space ");
os << unit;
os << "}";
if (known_unit && !known_hspace)
- os << "\n\\length "
- << translate_len(length);
+ os << "\n\\length " << translate_len(length);
+ if (is_gluelength)
+ os << "\n\\length " << gluelength;
end_inset(os);
- } else if (known_unit || known_vspace) {
+ } else if (known_unit || known_vspace || is_gluelength) {
// Literal vertical length or known variable
context.check_layout(os);
begin_inset(os, "VSpace ");
- if (known_unit)
- os << value;
- os << unit;
+ if (known_vspace)
+ os << unit;
+ if (known_unit && !known_vspace)
+ os << translate_len(length);
+ if (is_gluelength)
+ os << gluelength;
if (starred)
os << '*';
end_inset(os);
name += '*';
if (valid) {
if (value == 1.0)
- handle_ert(os, name + '{' + unit + '}', context);
+ output_ert_inset(os, name + '{' + unit + '}', context);
else if (value == -1.0)
- handle_ert(os, name + "{-" + unit + '}', context);
+ output_ert_inset(os, name + "{-" + unit + '}', context);
else
- handle_ert(os, name + '{' + valstring + unit + '}', context);
+ output_ert_inset(os, name + '{' + valstring + unit + '}', context);
} else
- handle_ert(os, name + '{' + length + '}', context);
+ output_ert_inset(os, name + '{' + length + '}', context);
}
}
// The single '=' is meant here.
- else if ((newinsetlayout = findInsetLayout(context.textclass, t.cs(), true))) {
+ else if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true))) {
+ if (starred)
+ p.get_token();
p.skip_spaces();
context.check_layout(os);
- begin_inset(os, "Flex ");
- os << to_utf8(newinsetlayout->name()) << '\n'
- << "status collapsed\n";
- parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
+ docstring const name = newinsetlayout->name();
+ bool const caption = name.find(from_ascii("Caption:")) == 0;
+ if (caption) {
+ begin_inset(os, "Caption ");
+ os << to_utf8(name.substr(8)) << '\n';
+ } else {
+ begin_inset(os, "Flex ");
+ os << to_utf8(name) << '\n'
+ << "status collapsed\n";
+ }
+ if (newinsetlayout->isPassThru()) {
+ // set catcodes to verbatim early, just in case.
+ p.setCatcodes(VERBATIM_CATCODES);
+ string delim = p.get_token().asInput();
+ if (delim != "{")
+ cerr << "Warning: bad delimiter for command " << t.asInput() << endl;
+ //FIXME: handle error condition
+ string const arg = p.verbatimStuff("}").second;
+ Context newcontext(true, context.textclass);
+ if (newinsetlayout->forcePlainLayout())
+ newcontext.layout = &context.textclass.plainLayout();
+ output_ert(os, arg, newcontext);
+ } else
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
+ if (caption)
+ p.skip_spaces();
end_inset(os);
}
vector<string> keys;
split_map(arg, opts, keys);
string name = normalize_filename(p.verbatim_item());
- string const path = getMasterFilePath();
+ string const path = getMasterFilePath(true);
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
if (!makeAbsPath(name, path).exists()) {
pdflatex = true;
}
}
- if (makeAbsPath(name, path).exists())
- fix_relative_filename(name);
- else
+ FileName const absname = makeAbsPath(name, path);
+ if (absname.exists())
+ {
+ fix_child_filename(name);
+ copy_file(absname, name);
+ } else
cerr << "Warning: Could not find file '"
<< name << "'." << endl;
// write output
else if (t.cs() == "loadgame") {
p.skip_spaces();
string name = normalize_filename(p.verbatim_item());
- string const path = getMasterFilePath();
+ string const path = getMasterFilePath(true);
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
if (!makeAbsPath(name, path).exists()) {
if (!lyxskak_name.empty())
name = lyxskak_name;
}
- if (makeAbsPath(name, path).exists())
- fix_relative_filename(name);
- else
+ FileName const absname = makeAbsPath(name, path);
+ if (absname.exists())
+ {
+ fix_child_filename(name);
+ copy_file(absname, name);
+ } else
cerr << "Warning: Could not find file '"
<< name << "'." << endl;
context.check_layout(os);
// try to see whether the string is in unicodesymbols
// Only use text mode commands, since we are in text mode here,
// and math commands may be invalid (bug 6797)
+ string name = t.asInput();
+ // handle the dingbats, cyrillic and greek
+ if (name == "\\ding" || name == "\\textcyr" ||
+ (name == "\\textgreek" && !preamble.usePolyglossia()))
+ name = name + '{' + p.getArg('{', '}') + '}';
+ // handle the ifsym characters
+ else if (name == "\\textifsymbol") {
+ string const optif = p.getFullOpt();
+ string const argif = p.getArg('{', '}');
+ name = name + optif + '{' + argif + '}';
+ }
+ // handle the \ascii characters
+ // the case of \ascii within braces, as LyX outputs it, is already
+ // handled for t.cat() == catBegin
+ else if (name == "\\ascii") {
+ // the code is "\asci\xxx"
+ name = "{" + name + p.get_token().asInput() + "}";
+ skip_braces(p);
+ }
+ // handle some TIPA special characters
+ else if (preamble.isPackageUsed("tipa")) {
+ if (name == "\\textglobfall") {
+ name = "End";
+ skip_braces(p);
+ } else if (name == "\\s") {
+ // fromLaTeXCommand() does not yet
+ // recognize tipa short cuts
+ name = "\\textsyllabic";
+ } else if (name == "\\=" &&
+ p.next_token().asInput() == "*") {
+ // fromLaTeXCommand() does not yet
+ // recognize tipa short cuts
+ p.get_token();
+ name = "\\textsubbar";
+ } else if (name == "\\textdoublevertline") {
+ // FIXME: This is not correct,
+ // \textvertline is higher than \textbardbl
+ name = "\\textbardbl";
+ skip_braces(p);
+ } else if (name == "\\!" ) {
+ if (p.next_token().asInput() == "b") {
+ p.get_token(); // eat 'b'
+ name = "\\texthtb";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "d") {
+ p.get_token();
+ name = "\\texthtd";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "g") {
+ p.get_token();
+ name = "\\texthtg";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "G") {
+ p.get_token();
+ name = "\\texthtscg";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "j") {
+ p.get_token();
+ name = "\\texthtbardotlessj";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "o") {
+ p.get_token();
+ name = "\\textbullseye";
+ skip_braces(p);
+ }
+ } else if (name == "\\*" ) {
+ if (p.next_token().asInput() == "k") {
+ p.get_token();
+ name = "\\textturnk";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "r") {
+ p.get_token(); // eat 'b'
+ name = "\\textturnr";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "t") {
+ p.get_token();
+ name = "\\textturnt";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "w") {
+ p.get_token();
+ name = "\\textturnw";
+ skip_braces(p);
+ }
+ }
+ }
+ if ((name.size() == 2 &&
+ contains("\"'.=^`bcdHkrtuv~", name[1]) &&
+ p.next_token().asInput() != "*") ||
+ is_known(name.substr(1), known_tipa_marks)) {
+ // name is a command that corresponds to a
+ // combining character in unicodesymbols.
+ // Append the argument, fromLaTeXCommand()
+ // will either convert it to a single
+ // character or a combining sequence.
+ name += '{' + p.verbatim_item() + '}';
+ }
+ // now get the character from unicodesymbols
bool termination;
docstring rem;
set<string> req;
- docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()),
- Encodings::TEXT_CMD, termination, rem, &req);
+ docstring s = normalize_c(encodings.fromLaTeXCommand(from_utf8(name),
+ Encodings::TEXT_CMD, termination, rem, &req));
if (!s.empty()) {
- if (!rem.empty())
- cerr << "When parsing " << t.cs()
- << ", result is " << to_utf8(s)
- << "+" << to_utf8(rem) << endl;
context.check_layout(os);
os << to_utf8(s);
+ if (!rem.empty())
+ output_ert_inset(os, to_utf8(rem), context);
if (termination)
skip_spaces_braces(p);
for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
/*
string s = t.asInput();
string z = p.verbatim_item();
- while (p.good() && z != " " && z.size()) {
+ while (p.good() && z != " " && !z.empty()) {
//cerr << "read: " << z << endl;
s += z;
z = p.verbatim_item();
}
cerr << "found ERT: " << s << endl;
- handle_ert(os, s + ' ', context);
+ output_ert_inset(os, s + ' ', context);
*/
else {
- string name = t.asInput();
- if (p.next_token().asInput() == "*") {
+ if (t.asInput() == name &&
+ p.next_token().asInput() == "*") {
// Starred commands like \vspace*{}
p.get_token(); // Eat '*'
name += '*';
}
if (!parse_command(name, p, os, outer, context))
- handle_ert(os, name, context);
+ output_ert_inset(os, name, context);
}
}
}
}
+
+string guessLanguage(Parser & p, string const & lang)
+{
+ typedef std::map<std::string, size_t> LangMap;
+ // map from language names to number of characters
+ LangMap used;
+ used[lang] = 0;
+ for (char const * const * i = supported_CJK_languages; *i; i++)
+ used[string(*i)] = 0;
+
+ while (p.good()) {
+ Token const t = p.get_token();
+ // comments are not counted for any language
+ if (t.cat() == catComment)
+ continue;
+ // commands are not counted as well, but we need to detect
+ // \begin{CJK} and switch encoding if needed
+ if (t.cat() == catEscape) {
+ if (t.cs() == "inputencoding") {
+ string const enc = subst(p.verbatim_item(), "\n", " ");
+ p.setEncoding(enc, Encoding::inputenc);
+ continue;
+ }
+ if (t.cs() != "begin")
+ continue;
+ } else {
+ // Non-CJK content is counted for lang.
+ // We do not care about the real language here:
+ // If we have more non-CJK contents than CJK contents,
+ // we simply use the language that was specified as
+ // babel main language.
+ used[lang] += t.asInput().length();
+ continue;
+ }
+ // Now we are starting an environment
+ p.pushPosition();
+ string const name = p.getArg('{', '}');
+ if (name != "CJK") {
+ p.popPosition();
+ continue;
+ }
+ // It is a CJK environment
+ p.popPosition();
+ /* name = */ p.getArg('{', '}');
+ string const encoding = p.getArg('{', '}');
+ /* mapping = */ p.getArg('{', '}');
+ string const encoding_old = p.getEncoding();
+ char const * const * const where =
+ is_known(encoding, supported_CJK_encodings);
+ if (where)
+ p.setEncoding(encoding, Encoding::CJK);
+ else
+ p.setEncoding("UTF-8");
+ string const text = p.ertEnvironment("CJK");
+ p.setEncoding(encoding_old);
+ p.skip_spaces();
+ if (!where) {
+ // ignore contents in unknown CJK encoding
+ continue;
+ }
+ // the language of the text
+ string const cjk =
+ supported_CJK_languages[where - supported_CJK_encodings];
+ used[cjk] += text.length();
+ }
+ LangMap::const_iterator use = used.begin();
+ for (LangMap::const_iterator it = used.begin(); it != used.end(); ++it) {
+ if (it->second > use->second)
+ use = it;
+ }
+ return use->first;
+}
+
// }])