/*!
* natbib commands.
- * The starred forms are also known.
+ * The starred forms are also known except for "citefullauthor",
+ * "citeyear" and "citeyearpar".
*/
char const * const known_natbib_commands[] = { "cite", "citet", "citep",
"citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
// "fullcite",
// "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
// "footciteauthor", "footciteyear", "footciteyearpar",
-"citefield", "citetitle", "cite*", 0 };
+"citefield", "citetitle", 0 };
/// LaTeX names for quotes
char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
/// spaces known by InsetSpace
char const * const known_spaces[] = { " ", "space", ",", "thinspace", "quad",
-"qquad", "enspace", "enskip", "negthinspace", 0};
+"qquad", "enspace", "enskip", "negthinspace", "hfill", "dotfill", "hrulefill",
+"leftarrowfill", "rightarrowfill", "upbracefill", "downbracefill", 0};
/// the same as known_spaces with .lyx names
char const * const known_coded_spaces[] = { "space{}", "space{}",
"thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
-"negthinspace{}", 0};
+"negthinspace{}", "hfill{}", "dotfill{}", "hrulefill{}", "leftarrowfill{}",
+"rightarrowfill{}", "upbracefill{}", "downbracefill{}", 0};
-/// splits "x=z, y=b" into a map
-map<string, string> split_map(string const & s)
+/// splits "x=z, y=b" into a map and an ordered keyword vector
+void split_map(string const & s, map<string, string> & res, vector<string> & keys)
{
- map<string, string> res;
vector<string> v;
split(s, v);
+ res.clear();
+ keys.resize(v.size());
for (size_t i = 0; i < v.size(); ++i) {
size_t const pos = v[i].find('=');
- string const index = v[i].substr(0, pos);
- string const value = v[i].substr(pos + 1, string::npos);
- res[trim(index)] = trim(value);
+ string const index = trim(v[i].substr(0, pos));
+ string const value = trim(v[i].substr(pos + 1, string::npos));
+ res[index] = value;
+ keys[i] = index;
}
- return res;
}
os << "\n\\begin_inset " << name;
}
-/*// use this void when format 288 is supported
+
void begin_command_inset(ostream & os, string const & name,
- string const & latexname)
+ string const & latexname)
{
- os << "\n\\begin_inset CommandInset " << name;
- os << "\nLatexCommand " << latexname << "\n";
-}*/
+ begin_inset(os, "CommandInset ");
+ os << name << "\nLatexCommand " << latexname << '\n';
+}
void end_inset(ostream & os)
}
-void skip_braces(Parser & p)
+bool skip_braces(Parser & p)
{
if (p.next_token().cat() != catBegin)
- return;
+ return false;
p.get_token();
if (p.next_token().cat() == catEnd) {
p.get_token();
- return;
+ return true;
}
p.putback();
+ return false;
}
void eat_whitespace(Parser &, ostream &, Context &, bool);
+/*!
+ * Skips whitespace and braces.
+ * This should be called after a command has been parsed that is not put into
+ * ERT, and where LyX adds "{}" if needed.
+ */
+void skip_spaces_braces(Parser & p)
+{
+ /* The following four examples produce the same typeset output and
+ should be handled by this function:
+ - abc \j{} xyz
+ - abc \j {} xyz
+ - abc \j
+ {} xyz
+ - abc \j %comment
+ {} xyz
+ */
+ // Unfortunately we need to skip comments, too.
+ // We can't use eat_whitespace since writing them after the {}
+ // results in different output in some cases.
+ bool const skipped_spaces = p.skip_spaces(true);
+ bool const skipped_braces = skip_braces(p);
+ if (skipped_spaces && !skipped_braces)
+ // put back the space (it is better handled by check_space)
+ p.unskip_spaces(true);
+}
+
+
void output_command_layout(ostream & os, Parser & p, bool outer,
Context & parent_context,
Layout const * newlayout)
ert += '{' + p.verbatim_item() + '}';
break;
case optional:
- ert += p.getOpt();
+ // true because we must not eat whitespace
+ ert += p.getOpt(true);
break;
}
}
p.skip_spaces();
// We add a protected space if something real follows
if (p.good() && p.next_token().cat() != catComment) {
- os << "\\InsetSpace ~\n";
+ begin_inset(os, "Space ~\n");
+ end_inset(os);
}
}
#endif
p.skip_spaces();
}
- else if (name == "framed") {
- eat_whitespace(p, os, parent_context, false);
- parent_context.check_layout(os);
- begin_inset(os, "Note Framed\n");
- os << "status open\n";
- parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
- end_inset(os);
- p.skip_spaces();
- }
-
- else if (name == "shaded") {
+ else if (name == "framed" || name == "shaded") {
eat_whitespace(p, os, parent_context, false);
parent_context.check_layout(os);
- begin_inset(os, "Note Shaded\n");
- os << "status open\n";
+ if (name == "framed")
+ begin_inset(os, "Box Framed\n");
+ else
+ begin_inset(os, "Box Shaded\n");
+ os << "position \"t\"\n"
+ "hor_pos \"c\"\n"
+ "has_inner_box 0\n"
+ "inner_pos \"t\"\n"
+ "use_parbox 0\n"
+ "width \"100col%\"\n"
+ "special \"none\"\n"
+ "height \"1in\"\n"
+ "height_special \"totalheight\"\n"
+ "status open\n";
parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
end_inset(os);
p.skip_spaces();
// the two environments as one otherwise (bug 5716)
docstring const sep = from_ascii("--Separator--");
TeX2LyXDocClass const & textclass(parent_context.textclass);
- if (LYX_FORMAT >= 273 && textclass.hasLayout(sep)) {
+ if (textclass.hasLayout(sep)) {
Context newcontext(parent_context);
newcontext.layout = &(textclass[sep]);
newcontext.check_layout(os);
newcontext.check_end_layout(os);
}
+
+/// detects \\def, \\long\\def and \\global\\long\\def with ws and comments
+bool is_macro(Parser & p)
+{
+ Token first = p.curr_token();
+ if (first.cat() != catEscape || !p.good())
+ return false;
+ if (first.cs() == "def")
+ return true;
+ if (first.cs() != "global" && first.cs() != "long")
+ return false;
+ Token second = p.get_token();
+ int pos = 1;
+ while (p.good() && !p.isParagraph() && (second.cat() == catSpace ||
+ second.cat() == catNewline || second.cat() == catComment)) {
+ second = p.get_token();
+ pos++;
+ }
+ bool secondvalid = second.cat() == catEscape;
+ Token third;
+ bool thirdvalid = false;
+ if (p.good() && first.cs() == "global" && secondvalid &&
+ second.cs() == "long") {
+ third = p.get_token();
+ pos++;
+ while (p.good() && !p.isParagraph() &&
+ (third.cat() == catSpace ||
+ third.cat() == catNewline ||
+ third.cat() == catComment)) {
+ third = p.get_token();
+ pos++;
+ }
+ thirdvalid = third.cat() == catEscape;
+ }
+ for (int i = 0; i < pos; ++i)
+ p.putback();
+ if (!secondvalid)
+ return false;
+ if (!thirdvalid)
+ return (first.cs() == "global" || first.cs() == "long") &&
+ second.cs() == "def";
+ return first.cs() == "global" && second.cs() == "long" &&
+ third.cs() == "def";
+}
+
+
+/// Parse a macro definition (assumes that is_macro() returned true)
+void parse_macro(Parser & p, ostream & os, Context & context)
+{
+ context.check_layout(os);
+ Token first = p.curr_token();
+ Token second;
+ Token third;
+ string command = first.asInput();
+ if (first.cs() != "def") {
+ p.get_token();
+ eat_whitespace(p, os, context, false);
+ second = p.curr_token();
+ command += second.asInput();
+ if (second.cs() != "def") {
+ p.get_token();
+ eat_whitespace(p, os, context, false);
+ third = p.curr_token();
+ command += third.asInput();
+ }
+ }
+ eat_whitespace(p, os, context, false);
+ string const name = p.get_token().cs();
+ eat_whitespace(p, os, context, false);
+
+ // parameter text
+ bool simple = true;
+ string paramtext;
+ int arity = 0;
+ while (p.next_token().cat() != catBegin) {
+ if (p.next_token().cat() == catParameter) {
+ // # found
+ p.get_token();
+ paramtext += "#";
+
+ // followed by number?
+ if (p.next_token().cat() == catOther) {
+ char c = p.getChar();
+ paramtext += c;
+ // number = current arity + 1?
+ if (c == arity + '0' + 1)
+ ++arity;
+ else
+ simple = false;
+ } else
+ paramtext += p.get_token().cs();
+ } else {
+ paramtext += p.get_token().cs();
+ simple = false;
+ }
+ }
+
+ // only output simple (i.e. compatible) macro as FormulaMacros
+ string ert = '\\' + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
+ if (simple) {
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n\\def" << ert;
+ end_inset(os);
+ } else
+ handle_ert(os, command + ert, context);
+}
+
} // anonymous namespace
if (t.character() == '~') {
if (context.layout->free_spacing)
os << ' ';
- else
- os << "\\InsetSpace ~\n";
+ else {
+ begin_inset(os, "Space ~\n");
+ end_inset(os);
+ }
} else
os << t.cs();
}
else if (t.cs() == "bibitem") {
context.set_item();
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "bibitem", "bibitem");
os << "label \"" << p.getOptContent() << "\"\n";
os << "key \"" << p.verbatim_item() << "\"\n";
end_inset(os);
}
- else if (t.cs() == "def") {
- context.check_layout(os);
- eat_whitespace(p, os, context, false);
- string name = p.get_token().cs();
- eat_whitespace(p, os, context, false);
-
- // parameter text
- bool simple = true;
- string paramtext;
- int arity = 0;
- while (p.next_token().cat() != catBegin) {
- if (p.next_token().cat() == catParameter) {
- // # found
- p.get_token();
- paramtext += "#";
-
- // followed by number?
- if (p.next_token().cat() == catOther) {
- char c = p.getChar();
- paramtext += c;
- // number = current arity + 1?
- if (c == arity + '0' + 1)
- ++arity;
- else
- simple = false;
- } else
- paramtext += p.get_token().cs();
- } else {
- paramtext += p.get_token().cs();
- simple = false;
- }
- }
-
- // only output simple (i.e. compatible) macro as FormulaMacros
- string ert = "\\def\\" + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
- if (simple) {
- context.check_layout(os);
- begin_inset(os, "FormulaMacro");
- os << "\n" << ert;
- end_inset(os);
- } else
- handle_ert(os, ert, context);
- }
+ else if (is_macro(p))
+ parse_macro(p, os, context);
else if (t.cs() == "noindent") {
p.skip_spaces();
bool const clip = p.next_token().asInput() == "*";
if (clip)
p.get_token();
- map<string, string> opts = split_map(p.getArg('[', ']'));
+ string const arg = p.getArg('[', ']');
+ map<string, string> opts;
+ vector<string> keys;
+ split_map(arg, opts, keys);
if (clip)
opts["clip"] = string();
string name = normalize_filename(p.verbatim_item());
val = val*100;
os << "\tscale " << val << '\n';
}
- if (opts.find("angle") != opts.end())
+ if (opts.find("angle") != opts.end()) {
os << "\trotateAngle "
<< opts["angle"] << '\n';
+ vector<string>::const_iterator a =
+ find(keys.begin(), keys.end(), "angle");
+ vector<string>::const_iterator s =
+ find(keys.begin(), keys.end(), "width");
+ if (s == keys.end())
+ s = find(keys.begin(), keys.end(), "height");
+ if (s == keys.end())
+ s = find(keys.begin(), keys.end(), "scale");
+ if (s != keys.end() && distance(s, a) > 0)
+ os << "\tscaleBeforeRotation\n";
+ }
if (opts.find("origin") != opts.end()) {
ostringstream ss;
string const opt = opts["origin"];
context);
}
- else if (t.cs() == "hfill") {
- context.check_layout(os);
- os << "\n\\hfill\n";
- skip_braces(p);
- p.skip_spaces();
- }
-
else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
// FIXME: Somehow prevent title layouts if
// "maketitle" was not found
- p.skip_spaces();
- skip_braces(p); // swallow this
+ // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "tableofcontents") {
- p.skip_spaces();
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "toc", "tableofcontents");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listoffigures") {
- p.skip_spaces();
context.check_layout(os);
begin_inset(os, "FloatList figure\n");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listoftables") {
- p.skip_spaces();
context.check_layout(os);
begin_inset(os, "FloatList table\n");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listof") {
else if (is_known(t.cs(), known_ref_commands)) {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "ref", t.cs());
// LyX cannot handle newlines in a latex command
// FIXME: Move the substitution into parser::getOpt()?
os << subst(p.getOpt(), "\n", " ");
// LyX cannot handle newlines in the parameter
before = subst(before, "\n", " ");
}
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "citation", command);
os << "after " << '"' << after << '"' << "\n";
os << "before " << '"' << before << '"' << "\n";
os << "key " << '"' << p.verbatim_item() << '"' << "\n";
}
else if (use_jurabib &&
- is_known(t.cs(), known_jurabib_commands)) {
+ is_known(t.cs(), known_jurabib_commands) &&
+ (t.cs() == "cite" || p.next_token().asInput() != "*")) {
context.check_layout(os);
- string const command = t.cs();
+ string command = t.cs();
+ if (p.next_token().asInput() == "*") {
+ command += '*';
+ p.get_token();
+ }
char argumentOrder = '\0';
vector<string> const & options = used_packages["jurabib"];
if (find(options.begin(), options.end(),
before.erase(0, 1);
before.erase(before.length() - 1, 1);
}
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "citation", command);
os << "after " << '"' << after << '"' << "\n";
os << "before " << '"' << before << '"' << "\n";
os << "key " << '"' << citation << '"' << "\n";
context.check_layout(os);
// LyX cannot handle newlines in a latex command
string after = subst(p.getOptContent(), "\n", " ");
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "citation", "cite");
os << "after " << '"' << after << '"' << "\n";
os << "key " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
end_inset(os);
else if (t.cs() == "index") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
- // LyX cannot handle newlines in a latex command
- os << "name " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ begin_inset(os, "Index\n");
+ os << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context);
end_inset(os);
}
else if (t.cs() == "nomenclature") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "nomenclature", "nomenclature");
// LyX cannot handle newlines in a latex command
string prefix = subst(p.getOptContent(), "\n", " ");
if (!prefix.empty())
else if (t.cs() == "label") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "label", "label");
// LyX cannot handle newlines in a latex command
os << "name " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
end_inset(os);
else if (t.cs() == "printindex") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "index_print", "printindex");
end_inset(os);
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "printnomenclature") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
+ begin_command_inset(os, "nomencl_print", "printnomenclature");
end_inset(os);
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "url") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << t.cs() << "\n";
- // LyX cannot handle newlines in a latex command
- os << "target " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ begin_inset(os, "Flex URL\n");
+ os << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context);
end_inset(os);
}
|| t.cs() == "LaTeX") {
context.check_layout(os);
os << t.cs();
- skip_braces(p); // eat {}
+ skip_spaces_braces(p);
}
else if (t.cs() == "LaTeXe") {
context.check_layout(os);
os << "LaTeX2e";
- skip_braces(p); // eat {}
+ skip_spaces_braces(p);
}
else if (t.cs() == "ldots") {
context.check_layout(os);
- skip_braces(p);
os << "\\SpecialChar \\ldots{}\n";
+ skip_spaces_braces(p);
}
else if (t.cs() == "lyxarrow") {
context.check_layout(os);
os << "\\SpecialChar \\menuseparator\n";
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textcompwordmark") {
context.check_layout(os);
os << "\\SpecialChar \\textcompwordmark{}\n";
- skip_braces(p);
+ skip_spaces_braces(p);
+ }
+
+ else if (t.cs() == "slash") {
+ context.check_layout(os);
+ os << "\\SpecialChar \\slash{}\n";
+ skip_spaces_braces(p);
+ }
+
+ else if (t.cs() == "nobreakdash") {
+ context.check_layout(os);
+ os << "\\SpecialChar \\nobreakdash\n";
}
else if (t.cs() == "textquotedbl") {
else if (t.cs() == "textasciitilde") {
context.check_layout(os);
os << '~';
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textasciicircum") {
context.check_layout(os);
os << '^';
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textbackslash") {
context.check_layout(os);
os << "\n\\backslash\n";
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
}
}
- else if (t.cs() == "newline") {
+ else if (t.cs() == "newline"
+ || t.cs() == "linebreak") {
context.check_layout(os);
os << "\n\\" << t.cs() << "\n";
- skip_braces(p); // eat {}
+ skip_spaces_braces(p);
}
else if (t.cs() == "input" || t.cs() == "include"
|| t.cs() == "verbatiminput") {
- string name = '\\' + t.cs();
+ string name = t.cs();
if (t.cs() == "verbatiminput"
&& p.next_token().asInput() == "*")
name += p.get_token().asInput();
os << "\ttemplate XFig\n"
<< "\tfilename " << outname << '\n';
} else {
- begin_inset(os, "Include ");
- os << name << '{' << outname
- << "}\npreview false\n";
+ begin_command_inset(os, "include", name);
+ os << "preview false\n"
+ "filename \"" << outname << "\"\n";
}
end_inset(os);
}
else if (t.cs() == "bibliography") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << "bibtex" << "\n";
+ begin_command_inset(os, "bibtex", "bibtex");
os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
// Do we have a bibliographystyle set?
if (!bibliographystyle.empty())
begin_inset(os, "VSpace ");
os << t.cs();
end_inset(os);
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (is_known(t.cs(), known_spaces)) {
char const * const * where = is_known(t.cs(), known_spaces);
context.check_layout(os);
- os << "\\InsetSpace ";
+ begin_inset(os, "Space ");
os << '\\' << known_coded_spaces[where - known_spaces]
<< '\n';
+ end_inset(os);
// LaTeX swallows whitespace after all spaces except
// "\\,". We have to do that here, too, because LyX
// adds "{}" which would make the spaces significant.
}
else if (t.cs() == "newpage" ||
+ t.cs() == "pagebreak" ||
t.cs() == "clearpage" ||
t.cs() == "cleardoublepage") {
context.check_layout(os);
os << "\n\\" << t.cs() << "\n";
- skip_braces(p); // eat {}
+ skip_spaces_braces(p);
}
else if (t.cs() == "newcommand" ||
t.cs() == "providecommand" ||
t.cs() == "renewcommand") {
- // these could be handled by parse_command(), but
- // we need to call add_known_command() here.
+ // providecommand could be handled by parse_command(),
+ // but we need to call add_known_command() here.
string name = t.asInput();
if (p.next_token().asInput() == "*") {
// Starred form. Eat '*'
opt1 + opt2 +
'{' + p.verbatim_item() + '}';
- context.check_layout(os);
- begin_inset(os, "FormulaMacro");
- os << "\n" << ert;
- end_inset(os);
+ if (t.cs() == "providecommand")
+ handle_ert(os, ert, context);
+ else {
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n" << ert;
+ end_inset(os);
+ }
}
- else if (t.cs() == "vspace") {
+ else if (t.cs() == "hspace" || t.cs() == "vspace") {
bool starred = false;
if (p.next_token().asInput() == "*") {
p.get_token();
starred = true;
}
+ string name = t.asInput();
string const length = p.verbatim_item();
string unit;
string valstring;
bool valid = splitLatexLength(length, valstring, unit);
+ bool known_hspace = false;
bool known_vspace = false;
bool known_unit = false;
double value;
istringstream iss(valstring);
iss >> value;
if (value == 1.0) {
- if (unit == "\\smallskipamount") {
- unit = "smallskip";
- known_vspace = true;
- } else if (unit == "\\medskipamount") {
- unit = "medskip";
- known_vspace = true;
- } else if (unit == "\\bigskipamount") {
- unit = "bigskip";
- known_vspace = true;
- } else if (unit == "\\fill") {
- unit = "vfill";
- known_vspace = true;
+ if (t.cs()[0] == 'h') {
+ if (unit == "\\fill") {
+ if (!starred) {
+ unit = "";
+ name = "hfill";
+ }
+ known_hspace = true;
+ }
+ } else {
+ if (unit == "\\smallskipamount") {
+ unit = "smallskip";
+ known_vspace = true;
+ } else if (unit == "\\medskipamount") {
+ unit = "medskip";
+ known_vspace = true;
+ } else if (unit == "\\bigskipamount") {
+ unit = "bigskip";
+ known_vspace = true;
+ } else if (unit == "\\fill") {
+ unit = "vfill";
+ known_vspace = true;
+ }
}
}
- if (!known_vspace) {
+ if (!known_hspace && !known_vspace) {
switch (unitFromString(unit)) {
case Length::SP:
case Length::PT:
}
}
- if (known_unit || known_vspace) {
- // Literal length or known variable
+ if (t.cs()[0] == 'h' && (known_unit || known_hspace)) {
+ // Literal horizontal length or known variable
+ context.check_layout(os);
+ begin_inset(os, "Space \\");
+ os << name;
+ if (starred)
+ os << '*';
+ os << '{';
+ if (known_hspace)
+ os << unit;
+ os << "}\n";
+ if (known_unit && !known_hspace)
+ os << "\\length "
+ << translate_len(length) << '\n';
+ end_inset(os);
+ } else if (known_unit || known_vspace) {
+ // Literal vertical length or known variable
context.check_layout(os);
begin_inset(os, "VSpace ");
if (known_unit)
os << '*';
end_inset(os);
} else {
- // LyX can't handle other length variables in Inset VSpace
- string name = t.asInput();
+ // LyX can't handle other length variables in Inset V?Space
if (starred)
name += '*';
if (valid) {
else {
// try to see whether the string is in unicodesymbols
+ // Only use text mode commands, since we are in text mode here,
+ // and math commands may be invalid (bug 6797)
docstring rem;
- docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()), rem);
+ docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()),
+ rem, Encodings::TEXT_CMD);
if (!s.empty()) {
if (!rem.empty())
cerr << "When parsing " << t.cs()
<< "+" << to_utf8(rem) << endl;
context.check_layout(os);
os << to_utf8(s);
- p.skip_spaces();
- skip_braces(p); // eat {}
+ skip_spaces_braces(p);
}
//cerr << "#: " << t << " mode: " << mode << endl;
// heuristic: read up to next non-nested space