* This file is part of LyX, the document processor.
* Licence details can be found in the file COPYING.
*
- * \author André Pönitz
+ * \author André Pönitz
* \author Jean-Marc Lasgouttes
- * \author Uwe Stöhr
+ * \author Uwe Stöhr
*
* Full author contact details are available in file CREDITS.
*/
#include "tex2lyx.h"
#include "Context.h"
+#include "Encoding.h"
#include "FloatList.h"
#include "Layout.h"
#include "Length.h"
-#include "support/assert.h"
+#include "support/lassert.h"
#include "support/convert.h"
#include "support/FileName.h"
#include "support/filetools.h"
void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
- Context const & context)
+ Context const & context, InsetLayout const * layout)
{
+ bool const forcePlainLayout =
+ layout ? layout->forcePlainLayout() : false;
Context newcontext(true, context.textclass);
- newcontext.font = context.font;
+ if (forcePlainLayout)
+ newcontext.layout = &context.textclass.plainLayout();
+ else
+ newcontext.font = context.font;
parse_text(p, os, flags, outer, newcontext);
newcontext.check_end_layout(os);
}
namespace {
+void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
+ Context const & context, string const & name)
+{
+ InsetLayout const * layout = 0;
+ DocumentClass::InsetLayouts::const_iterator it =
+ context.textclass.insetLayouts().find(from_ascii(name));
+ if (it != context.textclass.insetLayouts().end())
+ layout = &(it->second);
+ parse_text_in_inset(p, os, flags, outer, context, layout);
+}
+
/// parses a paragraph snippet, useful for example for \\emph{...}
void parse_text_snippet(Parser & p, ostream & os, unsigned flags, bool outer,
Context & context)
{
Context newcontext(context);
- // Don't inherit the extra stuff
- newcontext.extra_stuff.clear();
+ // Don't inherit the paragraph-level extra stuff
+ newcontext.par_extra_stuff.clear();
parse_text(p, os, flags, outer, newcontext);
// Make sure that we don't create invalid .lyx files
context.need_layout = newcontext.need_layout;
newcontext.need_end_layout = false;
newcontext.new_layout_allowed = false;
// Avoid warning by Context::~Context()
- newcontext.extra_stuff.clear();
+ newcontext.par_extra_stuff.clear();
ostringstream os;
parse_text_snippet(p, os, flags, outer, newcontext);
return os.str();
}
-char const * const known_latex_commands[] = { "ref", "cite", "nocite", "label",
- "index", "printindex", "pageref", "url", "vref", "vpageref", "prettyref",
- "eqref", 0 };
+char const * const known_ref_commands[] = { "ref", "pageref", "vref",
+ "vpageref", "prettyref", "eqref", 0 };
/*!
* natbib commands.
- * We can't put these into known_latex_commands because the argument order
- * is reversed in lyx if there are 2 arguments.
- * The starred forms are also known.
+ * The starred forms are also known except for "citefullauthor",
+ * "citeyear" and "citeyearpar".
*/
char const * const known_natbib_commands[] = { "cite", "citet", "citep",
"citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
/*!
* jurabib commands.
- * We can't put these into known_latex_commands because the argument order
- * is reversed in lyx if there are 2 arguments.
* No starred form other than "cite*" known.
*/
char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
// "fullcite",
// "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
// "footciteauthor", "footciteyear", "footciteyearpar",
-"citefield", "citetitle", "cite*", 0 };
+"citefield", "citetitle", 0 };
/// LaTeX names for quotes
char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
"small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
/// the same as known_sizes with .lyx names
-char const * const known_coded_sizes[] = { "default", "tiny", "scriptsize", "footnotesize",
-"small", "normal", "large", "larger", "largest", "huge", "giant", 0};
+char const * const known_coded_sizes[] = { "tiny", "scriptsize", "footnotesize",
+"small", "normal", "large", "larger", "largest", "huge", "giant", 0};
/// LaTeX 2.09 names for font families
char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
/// spaces known by InsetSpace
char const * const known_spaces[] = { " ", "space", ",", "thinspace", "quad",
-"qquad", "enspace", "enskip", "negthinspace", 0};
+"qquad", "enspace", "enskip", "negthinspace", "hfill", "dotfill", "hrulefill",
+"leftarrowfill", "rightarrowfill", "upbracefill", "downbracefill", 0};
/// the same as known_spaces with .lyx names
char const * const known_coded_spaces[] = { "space{}", "space{}",
"thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
-"negthinspace{}", 0};
+"negthinspace{}", "hfill{}", "dotfill{}", "hrulefill{}", "leftarrowfill{}",
+"rightarrowfill{}", "upbracefill{}", "downbracefill{}", 0};
+
+/// These are translated by LyX to commands like "\\LyX{}", so we have to put
+/// them in ERT. "LaTeXe" must come before "LaTeX"!
+char const * const known_phrases[] = {"LyX", "TeX", "LaTeXe", "LaTeX", 0};
+char const * const known_coded_phrases[] = {"LyX", "TeX", "LaTeX2e", "LaTeX", 0};
+int const known_phrase_lengths[] = {3, 5, 7, 0};
-/// splits "x=z, y=b" into a map
-map<string, string> split_map(string const & s)
+/// splits "x=z, y=b" into a map and an ordered keyword vector
+void split_map(string const & s, map<string, string> & res, vector<string> & keys)
{
- map<string, string> res;
vector<string> v;
split(s, v);
+ res.clear();
+ keys.resize(v.size());
for (size_t i = 0; i < v.size(); ++i) {
size_t const pos = v[i].find('=');
- string const index = v[i].substr(0, pos);
- string const value = v[i].substr(pos + 1, string::npos);
- res[trim(index)] = trim(value);
+ string const index = trim(v[i].substr(0, pos));
+ string const value = trim(v[i].substr(pos + 1, string::npos));
+ res[index] = value;
+ keys[i] = index;
}
- return res;
}
}
-/// A simple function to translate a latex length to something lyx can
+/// A simple function to translate a latex length to something LyX can
/// understand. Not perfect, but rather best-effort.
bool translate_len(string const & length, string & valstring, string & unit)
{
string find_file(string const & name, string const & path,
char const * const * extensions)
{
- // FIXME UNICODE encoding of name and path may be wrong (makeAbsPath
- // expects utf8)
for (char const * const * what = extensions; *what; ++what) {
string const trial = addExtension(name, *what);
if (makeAbsPath(trial, path).exists())
}
+void begin_command_inset(ostream & os, string const & name,
+ string const & latexname)
+{
+ begin_inset(os, "CommandInset ");
+ os << name << "\nLatexCommand " << latexname << '\n';
+}
+
+
void end_inset(ostream & os)
{
os << "\n\\end_inset\n\n";
}
-void skip_braces(Parser & p)
+bool skip_braces(Parser & p)
{
if (p.next_token().cat() != catBegin)
- return;
+ return false;
p.get_token();
if (p.next_token().cat() == catEnd) {
p.get_token();
- return;
+ return true;
}
p.putback();
+ return false;
+}
+
+
+/// replace LaTeX commands in \p s from the unicodesymbols file with their
+/// unciode points
+docstring convert_unicodesymbols(docstring s)
+{
+ odocstringstream os;
+ for (size_t i = 0; i < s.size();) {
+ if (s[i] != '\\') {
+ os << s[i++];
+ continue;
+ }
+ s = s.substr(i);
+ docstring rem;
+ docstring parsed = encodings.fromLaTeXCommand(s, rem,
+ Encodings::TEXT_CMD);
+ os << parsed;
+ s = rem;
+ if (s.empty() || s[0] != '\\')
+ i = 0;
+ else
+ i = 1;
+ }
+ return os.str();
+}
+
+
+/// try to convert \p s to a valid InsetCommand argument
+string convert_command_inset_arg(string s)
+{
+ if (isAscii(s))
+ // since we don't know the input encoding we can't use from_utf8
+ s = to_utf8(convert_unicodesymbols(from_ascii(s)));
+ // LyX cannot handle newlines in a latex command
+ return subst(s, "\n", " ");
+}
+
+
+void handle_backslash(ostream & os, string const & s)
+{
+ for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
+ if (*it == '\\')
+ os << "\n\\backslash\n";
+ else
+ os << *it;
+ }
}
begin_inset(os, "ERT");
os << "\nstatus collapsed\n";
newcontext.check_layout(os);
- for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
- if (*it == '\\')
- os << "\n\\backslash\n";
- else
- os << *it;
- }
+ handle_backslash(os, s);
// make sure that our comment is the last thing on the line
newcontext.new_paragraph(os);
newcontext.check_layout(os);
}
-Layout const * findLayout(TextClass const & textclass, string const & name)
+Layout const * findLayout(TextClass const & textclass, string const & name, bool command)
+{
+ Layout const * layout = findLayoutWithoutModule(textclass, name, command);
+ if (layout)
+ return layout;
+ if (checkModule(name, command))
+ return findLayoutWithoutModule(textclass, name, command);
+ return layout;
+}
+
+
+InsetLayout const * findInsetLayout(TextClass const & textclass, string const & name, bool command)
{
- DocumentClass::const_iterator lit = textclass.begin();
- DocumentClass::const_iterator len = textclass.end();
- for (; lit != len; ++lit)
- if (lit->latexname() == name)
- return &*lit;
- return 0;
+ InsetLayout const * insetlayout = findInsetLayoutWithoutModule(textclass, name, command);
+ if (insetlayout)
+ return insetlayout;
+ if (checkModule(name, command))
+ return findInsetLayoutWithoutModule(textclass, name, command);
+ return insetlayout;
}
void eat_whitespace(Parser &, ostream &, Context &, bool);
-Layout * captionlayout()
+/*!
+ * Skips whitespace and braces.
+ * This should be called after a command has been parsed that is not put into
+ * ERT, and where LyX adds "{}" if needed.
+ */
+void skip_spaces_braces(Parser & p, bool keepws = false)
{
- static Layout * lay = 0;
- if (!lay) {
- lay = new Layout;
- lay->name_ = from_ascii("Caption");
- lay->latexname_ = "caption";
- lay->latextype = LATEX_COMMAND;
- lay->optionalargs = 1;
- }
- return lay;
+ /* The following four examples produce the same typeset output and
+ should be handled by this function:
+ - abc \j{} xyz
+ - abc \j {} xyz
+ - abc \j
+ {} xyz
+ - abc \j %comment
+ {} xyz
+ */
+ // Unfortunately we need to skip comments, too.
+ // We can't use eat_whitespace since writing them after the {}
+ // results in different output in some cases.
+ bool const skipped_spaces = p.skip_spaces(true);
+ bool const skipped_braces = skip_braces(p);
+ if (keepws && skipped_spaces && !skipped_braces)
+ // put back the space (it is better handled by check_space)
+ p.unskip_spaces(true);
}
Context & parent_context,
Layout const * newlayout)
{
+ TeXFont const oldFont = parent_context.font;
+ // save the current font size
+ string const size = oldFont.size;
+ // reset the font size to default, because the font size switches
+ // don't affect section headings and the like
+ parent_context.font.size = Context::normalfont.size;
+ // we only need to write the font change if we have an open layout
+ if (!parent_context.atParagraphStart())
+ output_font_change(os, oldFont, parent_context.font);
parent_context.check_end_layout(os);
Context context(true, parent_context.textclass, newlayout,
parent_context.layout, parent_context.font);
}
context.check_deeper(os);
context.check_layout(os);
- if (context.layout->optionalargs > 0) {
+ unsigned int optargs = 0;
+ while (optargs < context.layout->optargs) {
eat_whitespace(p, os, context, false);
- if (p.next_token().character() == '[') {
- p.get_token(); // eat '['
- begin_inset(os, "OptArg\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- }
+ if (p.next_token().cat() == catEscape ||
+ p.next_token().character() != '[')
+ break;
+ p.get_token(); // eat '['
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ ++optargs;
+ }
+ unsigned int reqargs = 0;
+ while (LYX_FORMAT >= 392 && reqargs < context.layout->reqargs) {
+ eat_whitespace(p, os, context, false);
+ if (p.next_token().cat() != catBegin)
+ break;
+ p.get_token(); // eat '{'
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ ++reqargs;
}
parse_text(p, os, FLAG_ITEM, outer, context);
context.check_end_layout(os);
// We don't need really a new paragraph, but
// we must make sure that the next item gets a \begin_layout.
parent_context.new_paragraph(os);
+ // Set the font size to the original value. No need to output it here
+ // (Context::begin_layout() will do that if needed)
+ parent_context.font.size = size;
}
* The drawback is that the logic inside the function becomes
* complicated, and that is the reason why it is not implemented.
*/
-void check_space(Parser const & p, ostream & os, Context & context)
+void check_space(Parser & p, ostream & os, Context & context)
{
Token const next = p.next_token();
Token const curr = p.curr_token();
parse_text(p, os, FLAG_ITEM, outer, context);
ert = "}";
break;
+ case item:
+ // This argument consists only of a single item.
+ // The presence of '{' or not must be preserved.
+ p.skip_spaces();
+ if (p.next_token().cat() == catBegin)
+ ert += '{' + p.verbatim_item() + '}';
+ else
+ ert += p.verbatim_item();
+ break;
case verbatim:
// This argument may contain special characters
ert += '{' + p.verbatim_item() + '}';
break;
case optional:
- ert += p.getOpt();
+ // true because we must not eat whitespace
+ // if an optional arg follows me must not strip the
+ // brackets from this one
+ if (i < no_arguments - 1 &&
+ template_arguments[i+1] == optional)
+ ert += p.getFullOpt(true);
+ else
+ ert += p.getOpt(true);
break;
}
}
/// Parses a minipage or parbox
-void parse_box(Parser & p, ostream & os, unsigned flags, bool outer,
- Context & parent_context, bool use_parbox)
+void parse_box(Parser & p, ostream & os, unsigned outer_flags,
+ unsigned inner_flags, bool outer, Context & parent_context,
+ string const & outer_type, string const & special,
+ string const & inner_type)
{
string position;
string inner_pos;
+ string hor_pos = "c";
// We need to set the height to the LaTeX default of 1\\totalheight
// for the case when no height argument is given
string height_value = "1";
string height_unit = "in";
string height_special = "totalheight";
string latex_height;
- if (p.next_token().asInput() == "[") {
+ if (!inner_type.empty() && p.hasOpt()) {
position = p.getArg('[', ']');
if (position != "t" && position != "c" && position != "b") {
+ cerr << "invalid position " << position << " for "
+ << inner_type << endl;
position = "c";
- cerr << "invalid position for minipage/parbox" << endl;
}
- if (p.next_token().asInput() == "[") {
+ if (p.hasOpt()) {
latex_height = p.getArg('[', ']');
translate_box_len(latex_height, height_value, height_unit, height_special);
- if (p.next_token().asInput() == "[") {
+ if (p.hasOpt()) {
inner_pos = p.getArg('[', ']');
if (inner_pos != "c" && inner_pos != "t" &&
inner_pos != "b" && inner_pos != "s") {
+ cerr << "invalid inner_pos "
+ << inner_pos << " for "
+ << inner_type << endl;
inner_pos = position;
- cerr << "invalid inner_pos for minipage/parbox"
- << endl;
}
}
}
}
string width_value;
string width_unit;
- string const latex_width = p.verbatim_item();
+ string latex_width;
+ if (inner_type.empty()) {
+ if (special.empty())
+ latex_width = "\\columnwidth";
+ else {
+ Parser p2(special);
+ latex_width = p2.getArg('[', ']');
+ string const opt = p2.getArg('[', ']');
+ if (!opt.empty()) {
+ hor_pos = opt;
+ if (hor_pos != "l" && hor_pos != "c" &&
+ hor_pos != "r") {
+ cerr << "invalid hor_pos " << hor_pos
+ << " for " << outer_type << endl;
+ hor_pos = "c";
+ }
+ }
+ }
+ } else
+ latex_width = p.verbatim_item();
translate_len(latex_width, width_value, width_unit);
- if (contains(width_unit, '\\') || contains(height_unit, '\\')) {
- // LyX can't handle length variables
- ostringstream ss;
- if (use_parbox)
- ss << "\\parbox";
+ // LyX can't handle length variables
+ bool use_ert = contains(width_unit, '\\') || contains(height_unit, '\\');
+ if (!use_ert && !outer_type.empty() && !inner_type.empty()) {
+ // Look whether there is some content after the end of the
+ // inner box, but before the end of the outer box.
+ // If yes, we need to output ERT.
+ p.pushPosition();
+ if (inner_flags & FLAG_END)
+ p.verbatimEnvironment(inner_type);
else
- ss << "\\begin{minipage}";
- if (!position.empty())
- ss << '[' << position << ']';
- if (!latex_height.empty())
- ss << '[' << latex_height << ']';
- if (!inner_pos.empty())
- ss << '[' << inner_pos << ']';
- ss << "{" << latex_width << "}";
- if (use_parbox)
- ss << '{';
+ p.verbatim_item();
+ p.skip_spaces(true);
+ if ((outer_type == "framed" && p.next_token().asInput() != "\\end") ||
+ (outer_type != "framed" && p.next_token().cat() != catEnd)) {
+ // something is between the end of the inner box and
+ // the end of the outer box, so we need to use ERT.
+ use_ert = true;
+ }
+ p.popPosition();
+ }
+ if (use_ert) {
+ ostringstream ss;
+ if (!outer_type.empty()) {
+ if (outer_flags & FLAG_END)
+ ss << "\\begin{" << outer_type << '}';
+ else {
+ ss << '\\' << outer_type << '{';
+ if (!special.empty())
+ ss << special;
+ }
+ }
+ if (!inner_type.empty()) {
+ if (inner_flags & FLAG_END)
+ ss << "\\begin{" << inner_type << '}';
+ else
+ ss << '\\' << inner_type;
+ if (!position.empty())
+ ss << '[' << position << ']';
+ if (!latex_height.empty())
+ ss << '[' << latex_height << ']';
+ if (!inner_pos.empty())
+ ss << '[' << inner_pos << ']';
+ ss << '{' << latex_width << '}';
+ if (!(inner_flags & FLAG_END))
+ ss << '{';
+ }
handle_ert(os, ss.str(), parent_context);
- parent_context.new_paragraph(os);
- parse_text_in_inset(p, os, flags, outer, parent_context);
- if (use_parbox)
- handle_ert(os, "}", parent_context);
- else
- handle_ert(os, "\\end{minipage}", parent_context);
+ if (!inner_type.empty()) {
+ parse_text(p, os, inner_flags, outer, parent_context);
+ if (inner_flags & FLAG_END)
+ handle_ert(os, "\\end{" + inner_type + '}',
+ parent_context);
+ else
+ handle_ert(os, "}", parent_context);
+ }
+ if (!outer_type.empty()) {
+ parse_text(p, os, outer_flags, outer, parent_context);
+ if (outer_flags & FLAG_END)
+ handle_ert(os, "\\end{" + outer_type + '}',
+ parent_context);
+ else
+ handle_ert(os, "}", parent_context);
+ }
} else {
// LyX does not like empty positions, so we have
// to set them to the LaTeX default values here.
if (inner_pos.empty())
inner_pos = position;
parent_context.check_layout(os);
- begin_inset(os, "Box Frameless\n");
+ begin_inset(os, "Box ");
+ if (outer_type == "framed")
+ os << "Framed\n";
+ else if (outer_type == "framebox")
+ os << "Boxed\n";
+ else if (outer_type == "shadowbox")
+ os << "Shadowbox\n";
+ else if (outer_type == "shaded")
+ os << "Shaded\n";
+ else if (outer_type == "doublebox")
+ os << "Doublebox\n";
+ else if (outer_type.empty())
+ os << "Frameless\n";
+ else
+ os << outer_type << '\n';
os << "position \"" << position << "\"\n";
- os << "hor_pos \"c\"\n";
- os << "has_inner_box 1\n";
+ os << "hor_pos \"" << hor_pos << "\"\n";
+ os << "has_inner_box " << !inner_type.empty() << "\n";
os << "inner_pos \"" << inner_pos << "\"\n";
- os << "use_parbox " << use_parbox << "\n";
+ os << "use_parbox " << (inner_type == "parbox") << '\n';
os << "width \"" << width_value << width_unit << "\"\n";
os << "special \"none\"\n";
os << "height \"" << height_value << height_unit << "\"\n";
os << "height_special \"" << height_special << "\"\n";
os << "status open\n\n";
- parse_text_in_inset(p, os, flags, outer, parent_context);
+ Context context(true, parent_context.textclass);
+ context.font = parent_context.font;
+
+ // If we have no inner box the contens will be read with the outer box
+ if (!inner_type.empty())
+ parse_text(p, os, inner_flags, outer, context);
+
+ // Ensure that the end of the outer box is parsed correctly:
+ // The opening brace has been eaten by parse_outer_box()
+ if (!outer_type.empty() && (outer_flags & FLAG_ITEM)) {
+ outer_flags &= ~FLAG_ITEM;
+ outer_flags |= FLAG_BRACE_LAST;
+ }
+
+ // Find end of outer box, output contents if inner_type is
+ // empty and output possible comments
+ if (!outer_type.empty()) {
+ // This does not output anything but comments if
+ // inner_type is not empty (see use_ert)
+ parse_text(p, os, outer_flags, outer, context);
+ }
+
+ context.check_end_layout(os);
end_inset(os);
#ifdef PRESERVE_LAYOUT
- // lyx puts a % after the end of the minipage
+ // LyX puts a % after the end of the minipage
if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
// new paragraph
//handle_comment(os, "%dummy", parent_context);
p.skip_spaces();
// We add a protected space if something real follows
if (p.good() && p.next_token().cat() != catComment) {
- os << "\\InsetSpace ~\n";
+ begin_inset(os, "space ~\n");
+ end_inset(os);
}
}
#endif
}
+void parse_outer_box(Parser & p, ostream & os, unsigned flags, bool outer,
+ Context & parent_context, string const & outer_type,
+ string const & special)
+{
+ eat_whitespace(p, os, parent_context, false);
+ if (flags & FLAG_ITEM) {
+ // Eat '{'
+ if (p.next_token().cat() == catBegin)
+ p.get_token();
+ else
+ cerr << "Warning: Ignoring missing '{' after \\"
+ << outer_type << '.' << endl;
+ eat_whitespace(p, os, parent_context, false);
+ }
+ string inner;
+ unsigned int inner_flags = 0;
+ if (outer_type == "shaded") {
+ // These boxes never have an inner box
+ ;
+ } else if (p.next_token().asInput() == "\\parbox") {
+ inner = p.get_token().cs();
+ inner_flags = FLAG_ITEM;
+ } else if (p.next_token().asInput() == "\\begin") {
+ // Is this a minipage?
+ p.pushPosition();
+ p.get_token();
+ inner = p.getArg('{', '}');
+ p.popPosition();
+ if (inner == "minipage") {
+ p.get_token();
+ p.getArg('{', '}');
+ eat_whitespace(p, os, parent_context, false);
+ inner_flags = FLAG_END;
+ } else
+ inner = "";
+ }
+ if (inner_flags == FLAG_END) {
+ active_environments.push_back(inner);
+ parse_box(p, os, flags, FLAG_END, outer, parent_context,
+ outer_type, special, inner);
+ active_environments.pop_back();
+ } else {
+ parse_box(p, os, flags, inner_flags, outer, parent_context,
+ outer_type, special, inner);
+ }
+}
+
+
+void parse_listings(Parser & p, ostream & os, Context & parent_context)
+{
+ parent_context.check_layout(os);
+ begin_inset(os, "listings\n");
+ os << "inline false\n"
+ << "status collapsed\n";
+ Context context(true, parent_context.textclass);
+ context.layout = &parent_context.textclass.plainLayout();
+ context.check_layout(os);
+ string const s = p.verbatimEnvironment("lstlisting");
+ for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
+ if (*it == '\\')
+ os << "\n\\backslash\n";
+ else if (*it == '\n') {
+ // avoid adding an empty paragraph at the end
+ if (it + 1 != et) {
+ context.new_paragraph(os);
+ context.check_layout(os);
+ }
+ } else
+ os << *it;
+ }
+ context.check_end_layout(os);
+ end_inset(os);
+}
+
+
/// parse an unknown environment
void parse_unknown_environment(Parser & p, string const & name, ostream & os,
unsigned flags, bool outer,
void parse_environment(Parser & p, ostream & os, bool outer,
- Context & parent_context)
+ string & last_env, Context & parent_context)
{
Layout const * newlayout;
+ InsetLayout const * newinsetlayout = 0;
string const name = p.getArg('{', '}');
const bool is_starred = suffixIs(name, '*');
string const unstarred_name = rtrim(name, "*");
eat_whitespace(p, os, parent_context, false);
parent_context.check_layout(os);
begin_inset(os, "Float " + unstarred_name + "\n");
- if (p.next_token().asInput() == "[") {
+ if (p.hasOpt())
os << "placement " << p.getArg('[', ']') << '\n';
- }
os << "wide " << convert<string>(is_starred)
<< "\nsideways false"
<< "\nstatus open\n\n";
else if (name == "minipage") {
eat_whitespace(p, os, parent_context, false);
- parse_box(p, os, FLAG_END, outer, parent_context, false);
+ parse_box(p, os, 0, FLAG_END, outer, parent_context, "", "", name);
p.skip_spaces();
}
p.skip_spaces();
}
- else if (name == "framed") {
+ else if (name == "framed" || name == "shaded") {
eat_whitespace(p, os, parent_context, false);
- parent_context.check_layout(os);
- begin_inset(os, "Note Framed\n");
- os << "status open\n";
- parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
- end_inset(os);
+ parse_outer_box(p, os, FLAG_END, outer, parent_context, name, "");
p.skip_spaces();
}
- else if (name == "shaded") {
+ else if (name == "lstlisting") {
eat_whitespace(p, os, parent_context, false);
- parent_context.check_layout(os);
- begin_inset(os, "Note Shaded\n");
- os << "status open\n";
- parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
- end_inset(os);
+ // FIXME handle listings with parameters
+ if (p.hasOpt())
+ parse_unknown_environment(p, name, os, FLAG_END,
+ outer, parent_context);
+ else
+ parse_listings(p, os, parent_context);
p.skip_spaces();
}
parse_unknown_environment(p, name, os, FLAG_END, outer,
parent_context);
- // Alignment settings
- else if (name == "center" || name == "flushleft" || name == "flushright" ||
- name == "centering" || name == "raggedright" || name == "raggedleft") {
+ // Alignment and spacing settings
+ // FIXME (bug xxxx): These settings can span multiple paragraphs and
+ // therefore are totally broken!
+ // Note that \centering, raggedright, and raggedleft cannot be handled, as
+ // they are commands not environments. They are furthermore switches that
+ // can be ended by another switches, but also by commands like \footnote or
+ // \parbox. So the only safe way is to leave them untouched.
+ else if (name == "center" || name == "centering" ||
+ name == "flushleft" || name == "flushright" ||
+ name == "singlespace" || name == "onehalfspace" ||
+ name == "doublespace" || name == "spacing") {
eat_whitespace(p, os, parent_context, false);
// We must begin a new paragraph if not already done
if (! parent_context.atParagraphStart()) {
parent_context.check_end_layout(os);
parent_context.new_paragraph(os);
}
- if (name == "flushleft" || name == "raggedright")
+ if (name == "flushleft")
parent_context.add_extra_stuff("\\align left\n");
- else if (name == "flushright" || name == "raggedleft")
+ else if (name == "flushright")
parent_context.add_extra_stuff("\\align right\n");
- else
+ else if (name == "center" || name == "centering")
parent_context.add_extra_stuff("\\align center\n");
+ else if (name == "singlespace")
+ parent_context.add_extra_stuff("\\paragraph_spacing single\n");
+ else if (name == "onehalfspace")
+ parent_context.add_extra_stuff("\\paragraph_spacing onehalf\n");
+ else if (name == "doublespace")
+ parent_context.add_extra_stuff("\\paragraph_spacing double\n");
+ else if (name == "spacing")
+ parent_context.add_extra_stuff("\\paragraph_spacing other " + p.verbatim_item() + "\n");
parse_text(p, os, FLAG_END, outer, parent_context);
- // Just in case the environment is empty ..
+ // Just in case the environment is empty
parent_context.extra_stuff.erase();
// We must begin a new paragraph to reset the alignment
parent_context.new_paragraph(os);
}
// The single '=' is meant here.
- else if ((newlayout = findLayout(parent_context.textclass, name)) &&
- newlayout->isEnvironment()) {
+ else if ((newlayout = findLayout(parent_context.textclass, name, false))) {
eat_whitespace(p, os, parent_context, false);
Context context(true, parent_context.textclass, newlayout,
parent_context.layout, parent_context.font);
context.need_end_deeper = true;
}
parent_context.check_end_layout(os);
+ if (last_env == name) {
+ // we need to output a separator since LyX would export
+ // the two environments as one otherwise (bug 5716)
+ docstring const sep = from_ascii("--Separator--");
+ TeX2LyXDocClass const & textclass(parent_context.textclass);
+ if (textclass.hasLayout(sep)) {
+ Context newcontext(parent_context);
+ newcontext.layout = &(textclass[sep]);
+ newcontext.check_layout(os);
+ newcontext.check_end_layout(os);
+ } else {
+ parent_context.check_layout(os);
+ begin_inset(os, "Note Note\n");
+ os << "status closed\n";
+ Context newcontext(true, textclass,
+ &(textclass.defaultLayout()));
+ newcontext.check_layout(os);
+ newcontext.check_end_layout(os);
+ end_inset(os);
+ parent_context.check_end_layout(os);
+ }
+ }
switch (context.layout->latextype) {
case LATEX_LIST_ENVIRONMENT:
- context.extra_stuff = "\\labelwidthstring "
- + p.verbatim_item() + '\n';
+ context.add_par_extra_stuff("\\labelwidthstring "
+ + p.verbatim_item() + '\n');
p.skip_spaces();
break;
case LATEX_BIB_ENVIRONMENT:
p.skip_spaces();
}
+ // The single '=' is meant here.
+ else if ((newinsetlayout = findInsetLayout(parent_context.textclass, name, false))) {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_inset(os, "Flex ");
+ os << to_utf8(newinsetlayout->name()) << '\n'
+ << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
+ end_inset(os);
+ }
+
else if (name == "appendix") {
// This is no good latex style, but it works and is used in some documents...
eat_whitespace(p, os, parent_context, false);
parse_unknown_environment(p, name, os, FLAG_END, outer,
parent_context);
+ last_env = name;
active_environments.pop_back();
}
/// convention (relative to .lyx file) if it is relative
void fix_relative_filename(string & name)
{
- FileName fname(name);
- if (fname.isAbsolute())
+ if (FileName::isAbsolute(name))
return;
- // FIXME UNICODE encoding of name may be wrong (makeAbsPath expects
- // utf8)
- name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFilename()),
+ name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFileName()),
from_utf8(getParentFilePath())));
}
// at all.
if (t.cat() == catEscape)
os << subst(t.asInput(), "\\", "\n\\backslash\n");
- else
- os << subst(t.asInput(), "\n", "\n\\newline\n");
+ else {
+ ostringstream oss;
+ begin_inset(oss, "Newline newline");
+ end_inset(oss);
+ os << subst(t.asInput(), "\n", oss.str());
+ }
// The scrap chunk is ended by an @ at the beginning of a line.
// After the @ the line may contain a comment and/or
// whitespace, but nothing else.
newcontext.check_end_layout(os);
}
+
+/// detects \\def, \\long\\def and \\global\\long\\def with ws and comments
+bool is_macro(Parser & p)
+{
+ Token first = p.curr_token();
+ if (first.cat() != catEscape || !p.good())
+ return false;
+ if (first.cs() == "def")
+ return true;
+ if (first.cs() != "global" && first.cs() != "long")
+ return false;
+ Token second = p.get_token();
+ int pos = 1;
+ while (p.good() && !p.isParagraph() && (second.cat() == catSpace ||
+ second.cat() == catNewline || second.cat() == catComment)) {
+ second = p.get_token();
+ pos++;
+ }
+ bool secondvalid = second.cat() == catEscape;
+ Token third;
+ bool thirdvalid = false;
+ if (p.good() && first.cs() == "global" && secondvalid &&
+ second.cs() == "long") {
+ third = p.get_token();
+ pos++;
+ while (p.good() && !p.isParagraph() &&
+ (third.cat() == catSpace ||
+ third.cat() == catNewline ||
+ third.cat() == catComment)) {
+ third = p.get_token();
+ pos++;
+ }
+ thirdvalid = third.cat() == catEscape;
+ }
+ for (int i = 0; i < pos; ++i)
+ p.putback();
+ if (!secondvalid)
+ return false;
+ if (!thirdvalid)
+ return (first.cs() == "global" || first.cs() == "long") &&
+ second.cs() == "def";
+ return first.cs() == "global" && second.cs() == "long" &&
+ third.cs() == "def";
+}
+
+
+/// Parse a macro definition (assumes that is_macro() returned true)
+void parse_macro(Parser & p, ostream & os, Context & context)
+{
+ context.check_layout(os);
+ Token first = p.curr_token();
+ Token second;
+ Token third;
+ string command = first.asInput();
+ if (first.cs() != "def") {
+ p.get_token();
+ eat_whitespace(p, os, context, false);
+ second = p.curr_token();
+ command += second.asInput();
+ if (second.cs() != "def") {
+ p.get_token();
+ eat_whitespace(p, os, context, false);
+ third = p.curr_token();
+ command += third.asInput();
+ }
+ }
+ eat_whitespace(p, os, context, false);
+ string const name = p.get_token().cs();
+ eat_whitespace(p, os, context, false);
+
+ // parameter text
+ bool simple = true;
+ string paramtext;
+ int arity = 0;
+ while (p.next_token().cat() != catBegin) {
+ if (p.next_token().cat() == catParameter) {
+ // # found
+ p.get_token();
+ paramtext += "#";
+
+ // followed by number?
+ if (p.next_token().cat() == catOther) {
+ char c = p.getChar();
+ paramtext += c;
+ // number = current arity + 1?
+ if (c == arity + '0' + 1)
+ ++arity;
+ else
+ simple = false;
+ } else
+ paramtext += p.get_token().cs();
+ } else {
+ paramtext += p.get_token().cs();
+ simple = false;
+ }
+ }
+
+ // only output simple (i.e. compatible) macro as FormulaMacros
+ string ert = '\\' + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
+ if (simple) {
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n\\def" << ert;
+ end_inset(os);
+ } else
+ handle_ert(os, command + ert, context);
+}
+
} // anonymous namespace
Context & context)
{
Layout const * newlayout = 0;
- // store the current selectlanguage to be used after \foreignlanguage
- string selectlang;
- // Store the latest bibliographystyle (needed for bibtex inset)
+ InsetLayout const * newinsetlayout = 0;
+ // Store the latest bibliographystyle and nocite{*} option
+ // (needed for bibtex inset)
+ string btprint;
string bibliographystyle;
bool const use_natbib = used_packages.find("natbib") != used_packages.end();
bool const use_jurabib = used_packages.find("jurabib") != used_packages.end();
+ string last_env;
while (p.good()) {
Token const & t = p.get_token();
#ifdef FILEDEBUG
- cerr << "t: " << t << " flags: " << flags << "\n";
+ debugToken(cerr, t, flags);
#endif
if (flags & FLAG_ITEM) {
flags |= FLAG_LEAVE;
}
- if (t.character() == ']' && (flags & FLAG_BRACK_LAST))
+ if (t.cat() != catEscape && t.character() == ']' &&
+ (flags & FLAG_BRACK_LAST))
+ return;
+ if (t.cat() == catEnd && (flags & FLAG_BRACE_LAST))
return;
+ // If there is anything between \end{env} and \begin{env} we
+ // don't need to output a separator.
+ if (t.cat() != catSpace && t.cat() != catNewline &&
+ t.asInput() != "\\begin")
+ last_env = "";
+
//
// cat codes
//
handle_ert(os, s, context);
}
- else if (t.cat() == catLetter ||
- t.cat() == catOther ||
+ else if (t.cat() == catLetter) {
+ context.check_layout(os);
+ // Workaround for bug 4752.
+ // FIXME: This whole code block needs to be removed
+ // when the bug is fixed and tex2lyx produces
+ // the updated file format.
+ // The replacement algorithm in LyX is so stupid that
+ // it even translates a phrase if it is part of a word.
+ bool handled = false;
+ for (int const * l = known_phrase_lengths; *l; ++l) {
+ string phrase = t.cs();
+ for (int i = 1; i < *l && p.next_token().isAlnumASCII(); ++i)
+ phrase += p.get_token().cs();
+ if (is_known(phrase, known_coded_phrases)) {
+ handle_ert(os, phrase, context);
+ handled = true;
+ break;
+ } else {
+ for (size_t i = 1; i < phrase.length(); ++i)
+ p.putback();
+ }
+ }
+ if (!handled)
+ os << t.cs();
+ }
+
+ else if (t.cat() == catOther ||
t.cat() == catAlign ||
t.cat() == catParameter) {
// This translates "&" to "\\&" which may be wrong...
context.check_layout(os);
- os << t.character();
+ os << t.cs();
}
else if (p.isParagraph()) {
if (t.character() == '~') {
if (context.layout->free_spacing)
os << ' ';
- else
- os << "\\InsetSpace ~\n";
+ else {
+ begin_inset(os, "space ~\n");
+ end_inset(os);
+ }
} else
- os << t.character();
+ os << t.cs();
}
else if (t.cat() == catBegin &&
next.character() == '*') {
p.get_token();
if (p.next_token().cat() == catEnd) {
- os << next.character();
+ os << next.cs();
p.get_token();
} else {
p.putback();
}
else if (t.cs() == "begin")
- parse_environment(p, os, outer, context);
+ parse_environment(p, os, outer, last_env, context);
else if (t.cs() == "end") {
if (flags & FLAG_END) {
p.skip_spaces();
string s;
bool optarg = false;
- if (p.next_token().character() == '[') {
+ if (p.next_token().cat() != catEscape &&
+ p.next_token().character() == '[') {
p.get_token(); // eat '['
s = parse_text_snippet(p, FLAG_BRACK_LAST,
outer, context);
}
if (optarg) {
if (context.layout->labeltype != LABEL_MANUAL) {
- // lyx does not support \item[\mybullet]
+ // LyX does not support \item[\mybullet]
// in itemize environments
handle_ert(os, "[", context);
os << s;
else if (t.cs() == "bibitem") {
context.set_item();
context.check_layout(os);
- os << "\\bibitem ";
- os << p.getOpt();
- os << '{' << p.verbatim_item() << '}' << "\n";
- }
-
- else if(t.cs() == "global") {
- // skip global which can appear in front of e.g. "def"
- }
-
- else if (t.cs() == "def") {
- context.check_layout(os);
- eat_whitespace(p, os, context, false);
- string name = p.get_token().cs();
- eat_whitespace(p, os, context, false);
-
- // parameter text
- bool simple = true;
- string paramtext;
- int arity = 0;
- while (p.next_token().cat() != catBegin) {
- if (p.next_token().cat() == catParameter) {
- // # found
- p.get_token();
- paramtext += "#";
-
- // followed by number?
- if (p.next_token().cat() == catOther) {
- char c = p.getChar();
- paramtext += c;
- // number = current arity + 1?
- if (c == arity + '0' + 1)
- ++arity;
- else
- simple = false;
- } else
- paramtext += p.get_token().asString();
- } else {
- paramtext += p.get_token().asString();
- simple = false;
- }
- }
-
- // only output simple (i.e. compatible) macro as FormulaMacros
- string ert = "\\def\\" + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
- if (simple) {
- context.check_layout(os);
- begin_inset(os, "FormulaMacro");
- os << "\n" << ert;
+ string label = convert_command_inset_arg(p.getArg('[', ']'));
+ string key = convert_command_inset_arg(p.verbatim_item());
+ if (contains(label, '\\') || contains(key, '\\')) {
+ // LyX can't handle LaTeX commands in labels or keys
+ handle_ert(os, t.asInput() + '[' + label +
+ "]{" + p.verbatim_item() + '}',
+ context);
+ } else {
+ begin_command_inset(os, "bibitem", "bibitem");
+ os << "label \"" << label << "\"\n"
+ "key \"" << key << "\"\n";
end_inset(os);
- } else
- handle_ert(os, ert, context);
+ }
}
+ else if (is_macro(p))
+ parse_macro(p, os, context);
+
else if (t.cs() == "noindent") {
p.skip_spaces();
- context.add_extra_stuff("\\noindent\n");
+ context.add_par_extra_stuff("\\noindent\n");
}
else if (t.cs() == "appendix") {
- context.add_extra_stuff("\\start_of_appendix\n");
+ context.add_par_extra_stuff("\\start_of_appendix\n");
// We need to start a new paragraph. Otherwise the
// appendix in 'bla\appendix\chapter{' would start
// too late.
eat_whitespace(p, os, context, true);
}
+ // Starred section headings
// Must attempt to parse "Section*" before "Section".
else if ((p.next_token().asInput() == "*") &&
context.new_layout_allowed &&
- // The single '=' is meant here.
- (newlayout = findLayout(context.textclass, t.cs() + '*')) &&
- newlayout->isCommand()) {
+ (newlayout = findLayout(context.textclass, t.cs() + '*', true))) {
+ // write the layout
p.get_token();
output_command_layout(os, p, outer, context, newlayout);
p.skip_spaces();
}
- // The single '=' is meant here.
+ // Section headings and the like
else if (context.new_layout_allowed &&
- (newlayout = findLayout(context.textclass, t.cs())) &&
- newlayout->isCommand()) {
+ (newlayout = findLayout(context.textclass, t.cs(), true))) {
+ // write the layout
output_command_layout(os, p, outer, context, newlayout);
p.skip_spaces();
}
- // Special handling for \caption
- // FIXME: remove this when InsetCaption is supported.
- else if (context.new_layout_allowed &&
- t.cs() == captionlayout()->latexname()) {
- output_command_layout(os, p, outer, context,
- captionlayout());
+ else if (t.cs() == "caption") {
+ p.skip_spaces();
+ context.check_layout(os);
p.skip_spaces();
+ begin_inset(os, "Caption\n\n");
+ Context newcontext(true, context.textclass);
+ newcontext.font = context.font;
+ newcontext.check_layout(os);
+ if (p.next_token().cat() != catEscape &&
+ p.next_token().character() == '[') {
+ p.get_token(); // eat '['
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ }
+ parse_text(p, os, FLAG_ITEM, outer, context);
+ context.check_end_layout(os);
+ // We don't need really a new paragraph, but
+ // we must make sure that the next item gets a \begin_layout.
+ context.new_paragraph(os);
+ end_inset(os);
+ p.skip_spaces();
+ newcontext.check_end_layout(os);
}
else if (t.cs() == "includegraphics") {
bool const clip = p.next_token().asInput() == "*";
if (clip)
p.get_token();
- map<string, string> opts = split_map(p.getArg('[', ']'));
+ string const arg = p.getArg('[', ']');
+ map<string, string> opts;
+ vector<string> keys;
+ split_map(arg, opts, keys);
if (clip)
opts["clip"] = string();
string name = normalize_filename(p.verbatim_item());
string const path = getMasterFilePath();
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
- // FIXME UNICODE encoding of name and path may be
- // wrong (makeAbsPath expects utf8)
if (!makeAbsPath(name, path).exists()) {
// The file extension is probably missing.
// Now try to find it out.
<< endl;
}
name = dvips_name;
- } else if (!pdftex_name.empty())
+ } else if (!pdftex_name.empty()) {
name = pdftex_name;
+ pdflatex = true;
+ }
}
- // FIXME UNICODE encoding of name and path may be
- // wrong (makeAbsPath expects utf8)
if (makeAbsPath(name, path).exists())
fix_relative_filename(name);
else
val = val*100;
os << "\tscale " << val << '\n';
}
- if (opts.find("angle") != opts.end())
+ if (opts.find("angle") != opts.end()) {
os << "\trotateAngle "
<< opts["angle"] << '\n';
+ vector<string>::const_iterator a =
+ find(keys.begin(), keys.end(), "angle");
+ vector<string>::const_iterator s =
+ find(keys.begin(), keys.end(), "width");
+ if (s == keys.end())
+ s = find(keys.begin(), keys.end(), "height");
+ if (s == keys.end())
+ s = find(keys.begin(), keys.end(), "scale");
+ if (s != keys.end() && distance(s, a) > 0)
+ os << "\tscaleBeforeRotation\n";
+ }
if (opts.find("origin") != opts.end()) {
ostringstream ss;
string const opt = opts["origin"];
p.skip_spaces();
context.check_layout(os);
string const s = p.verbatim_item();
+ //FIXME: this never triggers in UTF8
if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
os << s;
else
context);
}
- else if (t.cs() == "hfill") {
- context.check_layout(os);
- os << "\n\\hfill\n";
- skip_braces(p);
- p.skip_spaces();
- }
-
else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
// FIXME: Somehow prevent title layouts if
// "maketitle" was not found
- p.skip_spaces();
- skip_braces(p); // swallow this
+ // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "tableofcontents") {
- p.skip_spaces();
context.check_layout(os);
- begin_inset(os, "LatexCommand \\tableofcontents\n");
+ begin_command_inset(os, "toc", "tableofcontents");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listoffigures") {
- p.skip_spaces();
context.check_layout(os);
begin_inset(os, "FloatList figure\n");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listoftables") {
- p.skip_spaces();
context.check_layout(os);
begin_inset(os, "FloatList table\n");
end_inset(os);
- skip_braces(p); // swallow this
+ skip_spaces_braces(p);
}
else if (t.cs() == "listof") {
p.skip_spaces(true);
- string const name = p.get_token().asString();
+ string const name = p.get_token().cs();
if (context.textclass.floats().typeExist(name)) {
context.check_layout(os);
begin_inset(os, "FloatList ");
os << "\n\\" << t.cs() << " default\n";
}
+ else if (t.cs() == "lyxline") {
+ context.check_layout(os);
+ os << "\\lyxline";
+ }
+
+ else if (is_known(t.cs(), known_phrases) ||
+ (t.cs() == "protect" &&
+ p.next_token().cat() == catEscape &&
+ is_known(p.next_token().cs(), known_phrases))) {
+ // LyX sometimes puts a \protect in front, so we have to ignore it
+ // FIXME: This needs to be changed when bug 4752 is fixed.
+ char const * const * where = is_known(
+ t.cs() == "protect" ? p.get_token().cs() : t.cs(),
+ known_phrases);
+ context.check_layout(os);
+ os << known_coded_phrases[where - known_phrases];
+ skip_spaces_braces(p);
+ }
+
+ else if (is_known(t.cs(), known_ref_commands)) {
+ string const opt = p.getOpt();
+ if (opt.empty()) {
+ context.check_layout(os);
+ begin_command_inset(os, "ref", t.cs());
+ os << "reference \""
+ << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ } else {
+ // LyX does not support optional arguments of ref commands
+ handle_ert(os, t.asInput() + '[' + opt + "]{" +
+ p.verbatim_item() + "}", context);
+ }
+ }
+
else if (use_natbib &&
is_known(t.cs(), known_natbib_commands) &&
((t.cs() != "citefullauthor" &&
t.cs() != "citeyearpar") ||
p.next_token().asInput() != "*")) {
context.check_layout(os);
- // tex lyx
- // \citet[before][after]{a} \citet[after][before]{a}
- // \citet[before][]{a} \citet[][before]{a}
- // \citet[after]{a} \citet[after]{a}
- // \citet{a} \citet{a}
- string command = '\\' + t.cs();
+ string command = t.cs();
if (p.next_token().asInput() == "*") {
command += '*';
p.get_token();
}
- if (command == "\\citefullauthor")
+ if (command == "citefullauthor")
// alternative name for "\\citeauthor*"
- command = "\\citeauthor*";
+ command = "citeauthor*";
// text before the citation
string before;
string after;
get_cite_arguments(p, true, before, after);
- if (command == "\\cite") {
+ if (command == "cite") {
// \cite without optional argument means
// \citet, \cite with at least one optional
// argument means \citep.
if (before.empty() && after.empty())
- command = "\\citet";
+ command = "citet";
else
- command = "\\citep";
+ command = "citep";
}
if (before.empty() && after == "[]")
// avoid \citet[]{a}
before.erase();
after.erase();
}
- begin_inset(os, "LatexCommand ");
- os << command << after << before
- << '{' << p.verbatim_item() << "}\n";
+ // remove the brackets around after and before
+ if (!after.empty()) {
+ after.erase(0, 1);
+ after.erase(after.length() - 1, 1);
+ after = convert_command_inset_arg(after);
+ }
+ if (!before.empty()) {
+ before.erase(0, 1);
+ before.erase(before.length() - 1, 1);
+ before = convert_command_inset_arg(before);
+ }
+ begin_command_inset(os, "citation", command);
+ os << "after " << '"' << after << '"' << "\n";
+ os << "before " << '"' << before << '"' << "\n";
+ os << "key \""
+ << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
end_inset(os);
}
else if (use_jurabib &&
- is_known(t.cs(), known_jurabib_commands)) {
+ is_known(t.cs(), known_jurabib_commands) &&
+ (t.cs() == "cite" || p.next_token().asInput() != "*")) {
context.check_layout(os);
- string const command = '\\' + t.cs();
+ string command = t.cs();
+ if (p.next_token().asInput() == "*") {
+ command += '*';
+ p.get_token();
+ }
char argumentOrder = '\0';
vector<string> const & options = used_packages["jurabib"];
if (find(options.begin(), options.end(),
"package options if you used an\n"
"earlier jurabib version." << endl;
}
- begin_inset(os, "LatexCommand ");
- os << command << after << before
- << '{' << citation << "}\n";
+ if (!after.empty()) {
+ after.erase(0, 1);
+ after.erase(after.length() - 1, 1);
+ }
+ if (!before.empty()) {
+ before.erase(0, 1);
+ before.erase(before.length() - 1, 1);
+ }
+ begin_command_inset(os, "citation", command);
+ os << "after " << '"' << after << '"' << "\n";
+ os << "before " << '"' << before << '"' << "\n";
+ os << "key " << '"' << citation << '"' << "\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "cite"
+ || t.cs() == "nocite") {
+ context.check_layout(os);
+ string after = convert_command_inset_arg(p.getArg('[', ']'));
+ string key = convert_command_inset_arg(p.verbatim_item());
+ // store the case that it is "\nocite{*}" to use it later for
+ // the BibTeX inset
+ if (key != "*") {
+ begin_command_inset(os, "citation", t.cs());
+ os << "after " << '"' << after << '"' << "\n";
+ os << "key " << '"' << key << '"' << "\n";
+ end_inset(os);
+ } else if (t.cs() == "nocite")
+ btprint = key;
+ }
+
+ else if (t.cs() == "index") {
+ context.check_layout(os);
+ begin_inset(os, "Index\n");
+ os << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context, "Index");
+ end_inset(os);
+ }
+
+ else if (t.cs() == "nomenclature") {
+ context.check_layout(os);
+ begin_command_inset(os, "nomenclature", "nomenclature");
+ string prefix = convert_command_inset_arg(p.getArg('[', ']'));
+ if (!prefix.empty())
+ os << "prefix " << '"' << prefix << '"' << "\n";
+ os << "symbol " << '"'
+ << convert_command_inset_arg(p.verbatim_item());
+ os << "\"\ndescription \""
+ << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "label") {
+ context.check_layout(os);
+ begin_command_inset(os, "label", "label");
+ os << "name \""
+ << convert_command_inset_arg(p.verbatim_item())
+ << "\"\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "printindex") {
+ context.check_layout(os);
+ begin_command_inset(os, "index_print", "printindex");
+ end_inset(os);
+ skip_spaces_braces(p);
+ }
+
+ else if (t.cs() == "printnomenclature") {
+ context.check_layout(os);
+ begin_command_inset(os, "nomencl_print", "printnomenclature");
end_inset(os);
+ skip_spaces_braces(p);
}
- else if (is_known(t.cs(), known_latex_commands)) {
- // This needs to be after the check for natbib and
- // jurabib commands, because "cite" has different
- // arguments with natbib and jurabib.
+ else if (LYX_FORMAT >= 408 &&
+ (t.cs() == "textsuperscript" || t.cs() == "textsubscript")) {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << '\\' << t.cs();
- // lyx cannot handle newlines in a latex command
- // FIXME: Move the substitution into parser::getOpt()?
- os << subst(p.getOpt(), "\n", " ");
- os << subst(p.getOpt(), "\n", " ");
- os << '{' << subst(p.verbatim_item(), "\n", " ") << "}\n";
+ begin_inset(os, "script ");
+ os << t.cs().substr(4) << '\n';
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context);
end_inset(os);
}
else if (t.cs() == "selectlanguage") {
context.check_layout(os);
- // save the language for the case that a \foreignlanguage is used
- selectlang = subst(p.verbatim_item(), "\n", " ");
- os << "\\lang " << selectlang << "\n";
-
- }
+ // save the language for the case that a
+ // \foreignlanguage is used
- else if (t.cs() == "foreignlanguage") {
- context.check_layout(os);
- os << "\n\\lang " << subst(p.verbatim_item(), "\n", " ") << "\n";
- os << subst(p.verbatim_item(), "\n", " ");
- // set back to last selectlanguage
- os << "\n\\lang " << selectlang << "\n";
+ context.font.language = babel2lyx(p.verbatim_item());
+ os << "\n\\lang " << context.font.language << "\n";
}
- else if (t.cs() == "inputencoding")
- // write nothing because this is done by LyX using the "\lang"
- // information given by selectlanguage and foreignlanguage
- subst(p.verbatim_item(), "\n", " ");
-
- else if (t.cs() == "LyX" || t.cs() == "TeX"
- || t.cs() == "LaTeX") {
- context.check_layout(os);
- os << t.cs();
- skip_braces(p); // eat {}
+ else if (t.cs() == "foreignlanguage") {
+ string const lang = babel2lyx(p.verbatim_item());
+ parse_text_attributes(p, os, FLAG_ITEM, outer,
+ context, "\\lang",
+ context.font.language, lang);
}
- else if (t.cs() == "LaTeXe") {
- context.check_layout(os);
- os << "LaTeX2e";
- skip_braces(p); // eat {}
+ else if (t.cs() == "inputencoding") {
+ // nothing to write here
+ string const enc = subst(p.verbatim_item(), "\n", " ");
+ p.setEncoding(enc);
}
else if (t.cs() == "ldots") {
context.check_layout(os);
- skip_braces(p);
os << "\\SpecialChar \\ldots{}\n";
+ skip_spaces_braces(p);
}
else if (t.cs() == "lyxarrow") {
context.check_layout(os);
os << "\\SpecialChar \\menuseparator\n";
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textcompwordmark") {
context.check_layout(os);
os << "\\SpecialChar \\textcompwordmark{}\n";
+ skip_spaces_braces(p);
+ }
+
+ else if (t.cs() == "slash") {
+ context.check_layout(os);
+ os << "\\SpecialChar \\slash{}\n";
+ skip_spaces_braces(p);
+ }
+
+ else if (t.cs() == "nobreakdash" && p.next_token().asInput() == "-") {
+ context.check_layout(os);
+ os << "\\SpecialChar \\nobreakdash-\n";
+ p.get_token();
+ }
+
+ else if (t.cs() == "textquotedbl") {
+ context.check_layout(os);
+ os << "\"";
skip_braces(p);
}
else if (t.cs() == "textasciitilde") {
context.check_layout(os);
os << '~';
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textasciicircum") {
context.check_layout(os);
os << '^';
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "textbackslash") {
context.check_layout(os);
os << "\n\\backslash\n";
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
handle_ert(os, oss.str(), context);
}
- else if (t.cs() == "\"") {
- context.check_layout(os);
- string const name = p.verbatim_item();
- if (name == "a") os << '\xe4';
- else if (name == "o") os << '\xf6';
- else if (name == "u") os << '\xfc';
- else if (name == "A") os << '\xc4';
- else if (name == "O") os << '\xd6';
- else if (name == "U") os << '\xdc';
- else handle_ert(os, "\"{" + name + "}", context);
- }
-
// Problem: \= creates a tabstop inside the tabbing environment
// and else an accent. In the latter case we really would want
// \={o} instead of \= o.
else if (t.cs() == "=" && (flags & FLAG_TABBING))
handle_ert(os, t.asInput(), context);
- else if (t.cs() == "H" || t.cs() == "c" || t.cs() == "^"
- || t.cs() == "'" || t.cs() == "`"
- || t.cs() == "~" || t.cs() == "." || t.cs() == "=") {
- // we need the trim as the LyX parser chokes on such spaces
- // The argument of InsetLatexAccent is parsed as a
- // subset of LaTeX, so don't parse anything here,
- // but use the raw argument.
- // Otherwise we would convert \~{\i} wrongly.
- // This will of course not translate \~{\ss} to \~{ß},
- // but that does at least compile and does only look
- // strange on screen.
- context.check_layout(os);
- os << "\\i \\" << t.cs() << "{"
- << trim(p.verbatim_item(), " ")
- << "}\n";
- }
-
- else if (t.cs() == "ss") {
- context.check_layout(os);
- os << "\xdf";
- skip_braces(p); // eat {}
- }
-
- else if (t.cs() == "i" || t.cs() == "j" || t.cs() == "l" ||
- t.cs() == "L") {
- context.check_layout(os);
- os << "\\i \\" << t.cs() << "{}\n";
- skip_braces(p); // eat {}
+ // accents (see Table 6 in Comprehensive LaTeX Symbol List)
+ else if (t.cs().size() == 1
+ && contains("\"'.=^`bcdHkrtuv~", t.cs())) {
+ context.check_layout(os);
+ // try to see whether the string is in unicodesymbols
+ docstring rem;
+ string command = t.asInput() + "{"
+ + trim(p.verbatim_item())
+ + "}";
+ docstring s = encodings.fromLaTeXCommand(from_utf8(command), rem);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << command
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ os << to_utf8(s);
+ } else
+ // we did not find a non-ert version
+ handle_ert(os, command, context);
}
else if (t.cs() == "\\") {
context.check_layout(os);
- string const next = p.next_token().asInput();
- if (next == "[")
+ if (p.hasOpt())
handle_ert(os, "\\\\" + p.getOpt(), context);
- else if (next == "*") {
+ else if (p.next_token().asInput() == "*") {
p.get_token();
+ // getOpt() eats the following space if there
+ // is no optional argument, but that is OK
+ // here since it has no effect in the output.
handle_ert(os, "\\\\*" + p.getOpt(), context);
}
else {
- os << "\n\\newline\n";
+ begin_inset(os, "Newline newline");
+ end_inset(os);
}
}
else if (t.cs() == "newline" ||
- t.cs() == "linebreak") {
- context.check_layout(os);
- os << "\n\\" << t.cs() << "\n";
- skip_braces(p); // eat {}
- }
-
- else if (t.cs() == "href") {
- context.check_layout(os);
- begin_inset(os, "CommandInset ");
- os << t.cs() << "\n";
- os << "LatexCommand " << t.cs() << "\n";
- bool erase = false;
- size_t pos;
- // the first argument is "type:target", "type:" is optional
- // the second argument the name
- string href_target = subst(p.verbatim_item(), "\n", " ");
- string href_name = subst(p.verbatim_item(), "\n", " ");
- string href_type;
- // serach for the ":" to divide type from target
- if ((pos = href_target.find(":", 0)) != string::npos){
- href_type = href_target;
- href_type.erase(pos + 1, href_type.length());
- href_target.erase(0, pos + 1);
- erase = true;
- }
- os << "name " << '"' << href_name << '"' << "\n";
- os << "target " << '"' << href_target << '"' << "\n";
- if(erase)
- os << "type " << '"' << href_type << '"' << "\n";
+ (t.cs() == "linebreak" && !p.hasOpt())) {
+ context.check_layout(os);
+ begin_inset(os, "Newline ");
+ os << t.cs();
end_inset(os);
+ skip_spaces_braces(p);
}
else if (t.cs() == "input" || t.cs() == "include"
|| t.cs() == "verbatiminput") {
- string name = '\\' + t.cs();
+ string name = t.cs();
if (t.cs() == "verbatiminput"
&& p.next_token().asInput() == "*")
name += p.get_token().asInput();
context.check_layout(os);
- begin_inset(os, "Include ");
string filename(normalize_filename(p.getArg('{', '}')));
string const path = getMasterFilePath();
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
- // FIXME UNICODE encoding of filename and path may be
- // wrong (makeAbsPath expects utf8)
if ((t.cs() == "include" || t.cs() == "input") &&
!makeAbsPath(filename, path).exists()) {
// The file extension is probably missing.
if (!tex_name.empty())
filename = tex_name;
}
- // FIXME UNICODE encoding of filename and path may be
- // wrong (makeAbsPath expects utf8)
+ bool external = false;
+ string outname;
if (makeAbsPath(filename, path).exists()) {
string const abstexname =
- makeAbsPath(filename, path).absFilename();
+ makeAbsPath(filename, path).absFileName();
string const abslyxname =
changeExtension(abstexname, ".lyx");
+ string const absfigname =
+ changeExtension(abstexname, ".fig");
fix_relative_filename(filename);
string const lyxname =
changeExtension(filename, ".lyx");
- if (t.cs() != "verbatiminput" &&
- tex2lyx(abstexname, FileName(abslyxname))) {
- os << name << '{' << lyxname << "}\n";
+ bool xfig = false;
+ external = FileName(absfigname).exists();
+ if (t.cs() == "input") {
+ string const ext = getExtension(abstexname);
+
+ // Combined PS/LaTeX:
+ // x.eps, x.pstex_t (old xfig)
+ // x.pstex, x.pstex_t (new xfig, e.g. 3.2.5)
+ FileName const absepsname(
+ changeExtension(abstexname, ".eps"));
+ FileName const abspstexname(
+ changeExtension(abstexname, ".pstex"));
+ bool const xfigeps =
+ (absepsname.exists() ||
+ abspstexname.exists()) &&
+ ext == "pstex_t";
+
+ // Combined PDF/LaTeX:
+ // x.pdf, x.pdftex_t (old xfig)
+ // x.pdf, x.pdf_t (new xfig, e.g. 3.2.5)
+ FileName const abspdfname(
+ changeExtension(abstexname, ".pdf"));
+ bool const xfigpdf =
+ abspdfname.exists() &&
+ (ext == "pdftex_t" || ext == "pdf_t");
+ if (xfigpdf)
+ pdflatex = true;
+
+ // Combined PS/PDF/LaTeX:
+ // x_pspdftex.eps, x_pspdftex.pdf, x.pspdftex
+ string const absbase2(
+ removeExtension(abstexname) + "_pspdftex");
+ FileName const abseps2name(
+ addExtension(absbase2, ".eps"));
+ FileName const abspdf2name(
+ addExtension(absbase2, ".pdf"));
+ bool const xfigboth =
+ abspdf2name.exists() &&
+ abseps2name.exists() && ext == "pspdftex";
+
+ xfig = xfigpdf || xfigeps || xfigboth;
+ external = external && xfig;
+ }
+ if (external) {
+ outname = changeExtension(filename, ".fig");
+ } else if (xfig) {
+ // Don't try to convert, the result
+ // would be full of ERT.
+ outname = filename;
+ } else if (t.cs() != "verbatiminput" &&
+ tex2lyx(abstexname, FileName(abslyxname),
+ p.getEncoding())) {
+ outname = lyxname;
} else {
- os << name << '{' << filename << "}\n";
+ outname = filename;
}
} else {
cerr << "Warning: Could not find included file '"
<< filename << "'." << endl;
- os << name << '{' << filename << "}\n";
+ outname = filename;
+ }
+ if (external) {
+ begin_inset(os, "External\n");
+ os << "\ttemplate XFig\n"
+ << "\tfilename " << outname << '\n';
+ } else {
+ begin_command_inset(os, "include", name);
+ os << "preview false\n"
+ "filename \"" << outname << "\"\n";
}
- os << "preview false\n";
end_inset(os);
}
else if (t.cs() == "bibliographystyle") {
// store new bibliographystyle
bibliographystyle = p.verbatim_item();
- // output new bibliographystyle.
- // This is only necessary if used in some other macro than \bibliography.
- handle_ert(os, "\\bibliographystyle{" + bibliographystyle + "}", context);
+ // If any other command than \bibliography and
+ // \nocite{*} follows, we need to output the style
+ // (because it might be used by that command).
+ // Otherwise, it will automatically be output by LyX.
+ p.pushPosition();
+ bool output = true;
+ for (Token t2 = p.get_token(); p.good(); t2 = p.get_token()) {
+ if (t2.cat() == catBegin)
+ break;
+ if (t2.cat() != catEscape)
+ continue;
+ if (t2.cs() == "nocite") {
+ if (p.getArg('{', '}') == "*")
+ continue;
+ } else if (t2.cs() == "bibliography")
+ output = false;
+ break;
+ }
+ p.popPosition();
+ if (output) {
+ handle_ert(os,
+ "\\bibliographystyle{" + bibliographystyle + '}',
+ context);
+ }
}
else if (t.cs() == "bibliography") {
context.check_layout(os);
- begin_inset(os, "LatexCommand ");
- os << "\\bibtex";
- // Do we have a bibliographystyle set?
- if (!bibliographystyle.empty()) {
- os << '[' << bibliographystyle << ']';
+ begin_command_inset(os, "bibtex", "bibtex");
+ if (!btprint.empty()) {
+ os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
+ // clear the string because the next BibTeX inset can be without the
+ // \nocite{*} option
+ btprint.clear();
}
- os << '{' << p.verbatim_item() << "}\n";
+ os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
+ // Do we have a bibliographystyle set?
+ if (!bibliographystyle.empty())
+ os << "options " << '"' << bibliographystyle << '"' << "\n";
end_inset(os);
}
else if (t.cs() == "parbox")
- parse_box(p, os, FLAG_ITEM, outer, context, true);
-
+ parse_box(p, os, 0, FLAG_ITEM, outer, context, "", "", t.cs());
+
+ else if (t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
+ t.cs() == "shadowbox" || t.cs() == "doublebox")
+ parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), "");
+
+ else if (t.cs() == "framebox") {
+ string special = p.getFullOpt();
+ special += p.getOpt();
+ parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), special);
+ }
+
//\makebox() is part of the picture environment and different from \makebox{}
//\makebox{} will be parsed by parse_box when bug 2956 is fixed
else if (t.cs() == "makebox") {
begin_inset(os, "VSpace ");
os << t.cs();
end_inset(os);
- skip_braces(p);
+ skip_spaces_braces(p);
}
else if (is_known(t.cs(), known_spaces)) {
char const * const * where = is_known(t.cs(), known_spaces);
context.check_layout(os);
- begin_inset(os, "InsetSpace ");
+ begin_inset(os, "space ");
os << '\\' << known_coded_spaces[where - known_spaces]
<< '\n';
+ end_inset(os);
// LaTeX swallows whitespace after all spaces except
// "\\,". We have to do that here, too, because LyX
// adds "{}" which would make the spaces significant.
}
else if (t.cs() == "newpage" ||
- t.cs() == "pagebreak" ||
- t.cs() == "clearpage" ||
- t.cs() == "cleardoublepage") {
+ (t.cs() == "pagebreak" && !p.hasOpt()) ||
+ t.cs() == "clearpage" ||
+ t.cs() == "cleardoublepage") {
context.check_layout(os);
- os << "\n\\" << t.cs() << "\n";
- skip_braces(p); // eat {}
+ begin_inset(os, "Newpage ");
+ os << t.cs();
+ end_inset(os);
+ skip_spaces_braces(p);
}
- else if (t.cs() == "newcommand" ||
+ else if (t.cs() == "DeclareRobustCommand" ||
+ t.cs() == "DeclareRobustCommandx" ||
+ t.cs() == "newcommand" ||
+ t.cs() == "newcommandx" ||
t.cs() == "providecommand" ||
- t.cs() == "renewcommand" ||
- t.cs() == "newlyxcommand") {
- // these could be handled by parse_command(), but
- // we need to call add_known_command() here.
+ t.cs() == "providecommandx" ||
+ t.cs() == "renewcommand" ||
+ t.cs() == "renewcommandx") {
+ // DeclareRobustCommand, DeclareRobustCommandx,
+ // providecommand and providecommandx could be handled
+ // by parse_command(), but we need to call
+ // add_known_command() here.
string name = t.asInput();
if (p.next_token().asInput() == "*") {
// Starred form. Eat '*'
name += '*';
}
string const command = p.verbatim_item();
- string const opt1 = p.getOpt();
- string optionals;
- unsigned optionalsNum = 0;
- while (true) {
- string const opt = p.getFullOpt();
- if (opt.empty())
- break;
- optionalsNum++;
- optionals += opt;
+ string const opt1 = p.getFullOpt();
+ string const opt2 = p.getFullOpt();
+ add_known_command(command, opt1, !opt2.empty());
+ string const ert = name + '{' + command + '}' +
+ opt1 + opt2 +
+ '{' + p.verbatim_item() + '}';
+
+ if (t.cs() == "DeclareRobustCommand" ||
+ t.cs() == "DeclareRobustCommandx" ||
+ t.cs() == "providecommand" ||
+ t.cs() == "providecommandx" ||
+ name[name.length()-1] == '*')
+ handle_ert(os, ert, context);
+ else {
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n" << ert;
+ end_inset(os);
}
- add_known_command(command, opt1, optionalsNum);
- string const ert = name + '{' + command + '}' + opt1
- + optionals + '{' + p.verbatim_item() + '}';
-
- context.check_layout(os);
- begin_inset(os, "FormulaMacro");
- os << "\n" << ert;
- end_inset(os);
}
-
- else if (t.cs() == "newcommandx" ||
- t.cs() == "renewcommandx") {
- // \newcommandx{\foo}[2][usedefault, addprefix=\global,1=default]{#1,#2}
- // get command name
+ else if (t.cs() == "let" && p.next_token().asInput() != "*") {
+ // let could be handled by parse_command(),
+ // but we need to call add_known_command() here.
+ string ert = t.asInput();
+ string name;
+ p.skip_spaces();
+ if (p.next_token().cat() == catBegin) {
+ name = p.verbatim_item();
+ ert += '{' + name + '}';
+ } else {
+ name = p.verbatim_item();
+ ert += name;
+ }
string command;
- if (p.next_token().cat() == catBegin)
+ p.skip_spaces();
+ if (p.next_token().cat() == catBegin) {
command = p.verbatim_item();
- else
- command = "\\" + p.get_token().cs();
-
- // get arity, we do not check that it fits to the given
- // optional parameters here.
- string const opt1 = p.getOpt();
-
- // get options and default values for optional parameters
- std::vector<string> optionalValues;
- int optionalsNum = 0;
- if (p.next_token().character() == '[') {
- // skip '['
- p.get_token();
-
- // handle 'opt=value' options, separated by ','.
- eat_whitespace(p, os, context, false);
- while (p.next_token().character() != ']' && p.good()) {
- char_type nextc = p.next_token().character();
- if (nextc >= '1' && nextc <= '9') {
- // optional value -> get parameter number
- int n = p.getChar() - '0';
-
- // skip '='
- if (p.next_token().character() != '=') {
- cerr << "'=' expected after numeral option of \\newcommandx" << std::endl;
- // try to find ] or ,
- while (p.next_token().character() != ','
- && p.next_token().character() != ']')
- p.get_token();
- continue;
- } else
- p.get_token();
-
- // get value
- optionalValues.resize(max(size_t(n), optionalValues.size()));
- optionalValues[n - 1].clear();
- while (p.next_token().character() != ']'
- && p.next_token().character() != ',')
- optionalValues[n - 1] += p.verbatim_item();
- optionalsNum = max(n, optionalsNum);
- } else if (p.next_token().cat() == catLetter) {
- // we in fact ignore every non-optional
- // parameters
-
- // get option name
- docstring opt;
- while (p.next_token().cat() == catLetter)
- opt += p.getChar();
-
- // value?
- eat_whitespace(p, os, context, false);
- if (p.next_token().character() == '=') {
- p.get_token();
- while (p.next_token().character() != ']'
- && p.next_token().character() != ',')
- p.verbatim_item();
- }
- } else
- return;
-
- // skip komma
- eat_whitespace(p, os, context, false);
- if (p.next_token().character() == ',') {
- p.getChar();
- eat_whitespace(p, os, context, false);
- } else if (p.next_token().character() != ']')
- continue;
- }
-
- // skip ']'
- p.get_token();
+ ert += '{' + command + '}';
+ } else {
+ command = p.verbatim_item();
+ ert += command;
}
-
- // concat the default values to the optionals string
- string optionals;
- for (unsigned i = 0; i < optionalValues.size(); ++i)
- optionals += "[" + optionalValues[i] + "]";
-
- // register and output command
- add_known_command(command, opt1, optionalsNum);
- string const ert = "\\newcommand{" + command + '}' + opt1
- + optionals + '{' + p.verbatim_item() + '}';
-
- context.check_layout(os);
- begin_inset(os, "FormulaMacro");
- os << "\n" << ert;
- end_inset(os);
+ // If command is known, make name known too, to parse
+ // its arguments correctly. For this reason we also
+ // have commands in syntax.default that are hardcoded.
+ CommandMap::iterator it = known_commands.find(command);
+ if (it != known_commands.end())
+ known_commands[t.asInput()] = it->second;
+ handle_ert(os, ert, context);
}
- else if (t.cs() == "vspace") {
+ else if (t.cs() == "hspace" || t.cs() == "vspace") {
bool starred = false;
if (p.next_token().asInput() == "*") {
p.get_token();
starred = true;
}
+ string name = t.asInput();
string const length = p.verbatim_item();
string unit;
string valstring;
bool valid = splitLatexLength(length, valstring, unit);
+ bool known_hspace = false;
bool known_vspace = false;
bool known_unit = false;
double value;
istringstream iss(valstring);
iss >> value;
if (value == 1.0) {
- if (unit == "\\smallskipamount") {
- unit = "smallskip";
- known_vspace = true;
- } else if (unit == "\\medskipamount") {
- unit = "medskip";
- known_vspace = true;
- } else if (unit == "\\bigskipamount") {
- unit = "bigskip";
- known_vspace = true;
- } else if (unit == "\\fill") {
- unit = "vfill";
- known_vspace = true;
+ if (t.cs()[0] == 'h') {
+ if (unit == "\\fill") {
+ if (!starred) {
+ unit = "";
+ name = "\\hfill";
+ }
+ known_hspace = true;
+ }
+ } else {
+ if (unit == "\\smallskipamount") {
+ unit = "smallskip";
+ known_vspace = true;
+ } else if (unit == "\\medskipamount") {
+ unit = "medskip";
+ known_vspace = true;
+ } else if (unit == "\\bigskipamount") {
+ unit = "bigskip";
+ known_vspace = true;
+ } else if (unit == "\\fill") {
+ unit = "vfill";
+ known_vspace = true;
+ }
}
}
- if (!known_vspace) {
+ if (!known_hspace && !known_vspace) {
switch (unitFromString(unit)) {
case Length::SP:
case Length::PT:
}
}
- if (known_unit || known_vspace) {
- // Literal length or known variable
+ if (t.cs()[0] == 'h' && (known_unit || known_hspace)) {
+ // Literal horizontal length or known variable
+ context.check_layout(os);
+ begin_inset(os, "space ");
+ os << name;
+ if (starred)
+ os << '*';
+ os << '{';
+ if (known_hspace)
+ os << unit;
+ os << "}";
+ if (known_unit && !known_hspace)
+ os << "\n\\length "
+ << translate_len(length);
+ end_inset(os);
+ } else if (known_unit || known_vspace) {
+ // Literal vertical length or known variable
context.check_layout(os);
begin_inset(os, "VSpace ");
if (known_unit)
os << '*';
end_inset(os);
} else {
- // LyX can't handle other length variables in Inset VSpace
- string name = t.asInput();
+ // LyX can't handle other length variables in Inset VSpace/space
if (starred)
name += '*';
if (valid) {
}
}
+ // The single '=' is meant here.
+ else if ((newinsetlayout = findInsetLayout(context.textclass, t.cs(), true))) {
+ p.skip_spaces();
+ context.check_layout(os);
+ begin_inset(os, "Flex ");
+ os << to_utf8(newinsetlayout->name()) << '\n'
+ << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
+ end_inset(os);
+ }
+
else {
+ // try to see whether the string is in unicodesymbols
+ // Only use text mode commands, since we are in text mode here,
+ // and math commands may be invalid (bug 6797)
+ docstring rem;
+ docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()),
+ rem, Encodings::TEXT_CMD);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << t.cs()
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ context.check_layout(os);
+ os << to_utf8(s);
+ skip_spaces_braces(p);
+ }
//cerr << "#: " << t << " mode: " << mode << endl;
// heuristic: read up to next non-nested space
/*
cerr << "found ERT: " << s << endl;
handle_ert(os, s + ' ', context);
*/
- string name = t.asInput();
- if (p.next_token().asInput() == "*") {
- // Starred commands like \vspace*{}
- p.get_token(); // Eat '*'
- name += '*';
+ else {
+ string name = t.asInput();
+ if (p.next_token().asInput() == "*") {
+ // Starred commands like \vspace*{}
+ p.get_token(); // Eat '*'
+ name += '*';
+ }
+ if (!parse_command(name, p, os, outer, context))
+ handle_ert(os, name, context);
}
- if (! parse_command(name, p, os, outer, context))
- handle_ert(os, name, context);
}
if (flags & FLAG_LEAVE) {