string fboxrule = "";
string fboxsep = "";
string shadow_size = "";
-bool wasBoxAlign = false;
char const * const known_ref_commands[] = { "ref", "pageref", "vref",
"vpageref", "prettyref", "nameref", "eqref", 0 };
char const * const known_refstyle_commands[] = { "algref", "chapref", "corref",
"eqref", "enuref", "figref", "fnref", "lemref", "parref", "partref", "propref",
- "secref", "subref", "tabref", "thmref", 0 };
+ "secref", "subsecref", "tabref", "thmref", 0 };
char const * const known_refstyle_prefixes[] = { "alg", "chap", "cor",
"eq", "enu", "fig", "fn", "lem", "par", "part", "prop",
- "sec", "sub", "tab", "thm", 0 };
+ "sec", "subsec", "tab", "thm", 0 };
/**
// "footciteauthor", "footciteyear", "footciteyearpar",
"citefield", "citetitle", 0 };
+/*!
+ * biblatex commands.
+ * Known starred forms: \cite*, \citeauthor*, \Citeauthor*, \parencite*, \citetitle*.
+ */
+char const * const known_biblatex_commands[] = { "cite", "Cite", "textcite", "Textcite",
+"parencite", "Parencite", "citeauthor", "Citeauthor", "citeyear", "smartcite", "Smartcite",
+ "footcite", "Footcite", "autocite", "Autocite", "citetitle", "fullcite", "footfullcite",
+"supercite", "cites", "Cites", "textcites", "Textcites", "parencites", "Parencites",
+"smartcites", "Smartcites", "autocites", "Autocites", 0 };
+
+// Whether we need to insert a bibtex inset in a comment
+bool need_commentbib = false;
+
/// LaTeX names for quotes
char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
"guillemotright", "frqq", "fg", "glq", "glqq", "textquoteleft", "grq", "grqq",
"textovercross", "textsubarch", "textsuperimposetilde", "textraising",
"textlowering", "textadvancing", "textretracting", "textdoublegrave",
"texthighrise", "textlowrise", "textrisefall", "textsyllabic",
-"textsubring", 0};
+"textsubring", "textsubbar", 0};
/// TIPA tones that need special handling
char const * const known_tones[] = {"15", "51", "45", "12", "454", 0};
// string to store the float type to be able to determine the type of subfloats
string float_type = "";
+// string to store the float status of minted listings
+string minted_float = "";
+
+// whether a caption has been parsed for a floating minted listing
+bool minted_float_has_caption = false;
+
+// The caption for non-floating minted listings
+string minted_nonfloat_caption = "";
+
/// splits "x=z, y=b" into a map and an ordered keyword vector
void split_map(string const & s, map<string, string> & res, vector<string> & keys)
} else if (unit == "\\textheight") {
valstring = percentval;
unit = "theight%" + endlen;
+ } else if (unit == "\\baselineskip") {
+ valstring = percentval;
+ unit = "baselineskip%" + endlen;
}
return true;
}
-}
+} // namespace
string translate_len(string const & length)
}
-/*!
- * Find a file with basename \p name in path \p path and an extension
- * in \p extensions.
- */
-string find_file(string const & name, string const & path,
- char const * const * extensions)
-{
- for (char const * const * what = extensions; *what; ++what) {
- string const trial = addExtension(name, *what);
- if (makeAbsPath(trial, path).exists())
- return trial;
- }
- return string();
-}
-
-
void begin_inset(ostream & os, string const & name)
{
os << "\n\\begin_inset " << name;
bool termination;
docstring rem;
set<string> req;
- docstring parsed = encodings.fromLaTeXCommand(s,
- Encodings::TEXT_CMD, termination, rem, &req);
+ docstring parsed = normalize_c(encodings.fromLaTeXCommand(s,
+ Encodings::TEXT_CMD, termination, rem, &req));
set<string>::const_iterator it = req.begin();
set<string>::const_iterator en = req.end();
for (; it != en; ++it)
}
+void output_comment(Parser & p, ostream & os, string const & s,
+ Context & context)
+{
+ if (p.next_token().cat() == catNewline)
+ output_ert_inset(os, '%' + s, context);
+ else
+ output_ert_inset(os, '%' + s + '\n', context);
+}
+
+
Layout const * findLayout(TextClass const & textclass, string const & name, bool command)
{
Layout const * layout = findLayoutWithoutModule(textclass, name, command);
p.popPosition();
}
- // try to determine the box content alignment
- // first handle the simple case of "{\centering..."
- if (p.next_token().asInput() == "\\raggedright") {
- wasBoxAlign = true;
- hor_pos = "l";
- } else if (p.next_token().asInput() == "\\centering") {
- wasBoxAlign = true;
- hor_pos = "c";
- } else if (p.next_token().asInput() == "\\raggedleft") {
- wasBoxAlign = true;
- hor_pos = "r";
- } else {
- // now handle the cases "{%catNewline\centering..."
- // and "{catNewline\centering..."
- p.pushPosition();
- p.get_token().asInput();
- if (p.next_token().cat() == catComment || p.next_token().cat() == catNewline)
- p.get_token().asInput();
- if (p.next_token().asInput() == "\\raggedright") {
- wasBoxAlign = true;
- hor_pos = "l";
- } else if (p.next_token().asInput() == "\\centering") {
- wasBoxAlign = true;
- hor_pos = "c";
- } else if (p.next_token().asInput() == "\\raggedleft") {
- wasBoxAlign = true;
- hor_pos = "r";
- }
- p.popPosition();
- }
-
if (use_ert) {
ostringstream ss;
if (!outer_type.empty()) {
// LyX puts a % after the end of the minipage
if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
// new paragraph
- //output_ert_inset(os, "%dummy", parent_context);
+ //output_comment(p, os, "dummy", parent_context);
p.get_token();
p.skip_spaces();
parent_context.new_paragraph(os);
}
else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
- //output_ert_inset(os, "%dummy", parent_context);
+ //output_comment(p, os, "dummy", parent_context);
p.get_token();
p.skip_spaces();
// We add a protected space if something real follows
}
-void parse_listings(Parser & p, ostream & os, Context & parent_context, bool in_line)
+void parse_listings(Parser & p, ostream & os, Context & parent_context,
+ bool in_line, bool use_minted)
{
parent_context.check_layout(os);
begin_inset(os, "listings\n");
- if (p.hasOpt()) {
- string arg = p.verbatimOption();
+ string arg = p.hasOpt() ? subst(p.verbatimOption(), "\n", "") : string();
+ size_t i;
+ while ((i = arg.find(", ")) != string::npos
+ || (i = arg.find(",\t")) != string::npos)
+ arg.erase(i + 1, 1);
+
+ if (use_minted) {
+ string const language = p.getArg('{', '}');
+ p.skip_spaces(true);
+ arg += string(arg.empty() ? "" : ",") + "language=" + language;
+ if (!minted_float.empty()) {
+ arg += string(arg.empty() ? "" : ",") + minted_float;
+ minted_nonfloat_caption.clear();
+ }
+ }
+ if (!arg.empty()) {
os << "lstparams " << '"' << arg << '"' << '\n';
if (arg.find("\\color") != string::npos)
preamble.registerAutomaticallyLoadedPackage("color");
os << "status collapsed\n";
Context context(true, parent_context.textclass);
context.layout = &parent_context.textclass.plainLayout();
+ if (use_minted && prefixIs(minted_nonfloat_caption, "[t]")) {
+ minted_nonfloat_caption.erase(0,3);
+ os << "\n\\begin_layout Plain Layout\n";
+ begin_inset(os, "Caption Standard\n");
+ Context newcontext(true, context.textclass,
+ context.layout, 0, context.font);
+ newcontext.check_layout(os);
+ os << minted_nonfloat_caption << "\n";
+ newcontext.check_end_layout(os);
+ end_inset(os);
+ os << "\n\\end_layout\n";
+ minted_nonfloat_caption.clear();
+ }
string s;
if (in_line) {
// set catcodes to verbatim early, just in case.
//FIXME: handler error condition
s = p.verbatimStuff(delim).second;
// context.new_paragraph(os);
- } else
+ } else if (use_minted) {
+ s = p.verbatimEnvironment("minted");
+ } else {
s = p.verbatimEnvironment("lstlisting");
+ }
output_ert(os, s, context);
- end_inset(os);
+ if (use_minted && prefixIs(minted_nonfloat_caption, "[b]")) {
+ minted_nonfloat_caption.erase(0,3);
+ os << "\n\\begin_layout Plain Layout\n";
+ begin_inset(os, "Caption Standard\n");
+ Context newcontext(true, context.textclass,
+ context.layout, 0, context.font);
+ newcontext.check_layout(os);
+ os << minted_nonfloat_caption << "\n";
+ newcontext.check_end_layout(os);
+ end_inset(os);
+ os << "\n\\end_layout\n";
+ minted_nonfloat_caption.clear();
+ }
+ // Don't close the inset here for floating minted listings.
+ // It will be closed at the end of the listing environment.
+ if (!use_minted || minted_float.empty())
+ end_inset(os);
+ else {
+ eat_whitespace(p, os, parent_context, true);
+ Token t = p.get_token();
+ if (t.asInput() != "\\end") {
+ // If anything follows, collect it into a caption.
+ minted_float_has_caption = true;
+ os << "\n\\begin_layout Plain Layout\n"; // outer layout
+ begin_inset(os, "Caption Standard\n");
+ os << "\n\\begin_layout Plain Layout\n"; // inner layout
+ }
+ p.putback();
+ }
}
}
else if (unstarred_name == "sidewaysfigure"
- || unstarred_name == "sidewaystable") {
+ || unstarred_name == "sidewaystable"
+ || unstarred_name == "sidewaysalgorithm") {
+ string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
eat_whitespace(p, os, parent_context, false);
parent_context.check_layout(os);
if (unstarred_name == "sidewaysfigure")
begin_inset(os, "Float figure\n");
- else
+ else if (unstarred_name == "sidewaystable")
begin_inset(os, "Float table\n");
+ else if (unstarred_name == "sidewaysalgorithm")
+ begin_inset(os, "Float algorithm\n");
+ if (!opt.empty())
+ os << "placement " << opt << '\n';
+ if (contains(opt, "H"))
+ preamble.registerAutomaticallyLoadedPackage("float");
os << "wide " << convert<string>(is_starred)
<< "\nsideways true"
<< "\nstatus open\n\n";
preamble.registerAutomaticallyLoadedPackage("verbatim");
}
- else if (name == "verbatim") {
+ else if (unstarred_name == "verbatim") {
// FIXME: this should go in the generic code that
// handles environments defined in layout file that
// have "PassThru 1". However, the code over there is
// already too complicated for my taste.
+ string const ascii_name =
+ (name == "verbatim*") ? "Verbatim*" : "Verbatim";
parent_context.new_paragraph(os);
Context context(true, parent_context.textclass,
- &parent_context.textclass[from_ascii("Verbatim")]);
- string s = p.verbatimEnvironment("verbatim");
+ &parent_context.textclass[from_ascii(ascii_name)]);
+ string s = p.verbatimEnvironment(name);
output_ert(os, s, context);
p.skip_spaces();
}
p.skip_spaces(true);
os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
}
- os << "bibfiles " << '"' << bibfile << '"' << "\n";
- os << "options " << '"' << bibstyle << '"' << "\n";
+ os << "bibfiles " << '"' << bibfile << "\"\n"
+ << "options " << '"' << bibstyle << "\"\n";
parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
end_inset(os);
p.skip_spaces();
preamble.registerAutomaticallyLoadedPackage("framed");
}
- else if (name == "lstlisting") {
+ else if (name == "listing") {
+ minted_float = "float";
+ eat_whitespace(p, os, parent_context, false);
+ string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
+ if (!opt.empty())
+ minted_float += "=" + opt;
+ // If something precedes \begin{minted}, we output it at the end
+ // as a caption, in order to keep it inside the listings inset.
+ eat_whitespace(p, os, parent_context, true);
+ p.pushPosition();
+ Token const & t = p.get_token();
+ p.skip_spaces(true);
+ string const envname = p.next_token().cat() == catBegin
+ ? p.getArg('{', '}') : string();
+ bool prologue = t.asInput() != "\\begin" || envname != "minted";
+ p.popPosition();
+ minted_float_has_caption = false;
+ string content = parse_text_snippet(p, FLAG_END, outer,
+ parent_context);
+ size_t i = content.find("\\begin_inset listings");
+ bool minted_env = i != string::npos;
+ string caption;
+ if (prologue) {
+ caption = content.substr(0, i);
+ content.erase(0, i);
+ }
+ parent_context.check_layout(os);
+ if (minted_env && minted_float_has_caption) {
+ eat_whitespace(p, os, parent_context, true);
+ os << content << "\n";
+ if (!caption.empty())
+ os << caption << "\n";
+ os << "\n\\end_layout\n"; // close inner layout
+ end_inset(os); // close caption inset
+ os << "\n\\end_layout\n"; // close outer layout
+ } else if (!caption.empty()) {
+ if (!minted_env) {
+ begin_inset(os, "listings\n");
+ os << "lstparams " << '"' << minted_float << '"' << '\n';
+ os << "inline false\n";
+ os << "status collapsed\n";
+ }
+ os << "\n\\begin_layout Plain Layout\n";
+ begin_inset(os, "Caption Standard\n");
+ Context newcontext(true, parent_context.textclass,
+ 0, 0, parent_context.font);
+ newcontext.check_layout(os);
+ os << caption << "\n";
+ newcontext.check_end_layout(os);
+ end_inset(os);
+ os << "\n\\end_layout\n";
+ } else if (content.empty()) {
+ begin_inset(os, "listings\n");
+ os << "lstparams " << '"' << minted_float << '"' << '\n';
+ os << "inline false\n";
+ os << "status collapsed\n";
+ } else {
+ os << content << "\n";
+ }
+ end_inset(os); // close listings inset
+ parent_context.check_end_layout(os);
+ parent_context.new_paragraph(os);
+ p.skip_spaces();
+ minted_float.clear();
+ minted_float_has_caption = false;
+ }
+
+ else if (name == "lstlisting" || name == "minted") {
+ bool use_minted = name == "minted";
eat_whitespace(p, os, parent_context, false);
- parse_listings(p, os, parent_context, false);
+ if (use_minted && minted_float.empty()) {
+ // look ahead for a bottom caption
+ p.pushPosition();
+ bool found_end_minted = false;
+ while (!found_end_minted && p.good()) {
+ Token const & t = p.get_token();
+ p.skip_spaces();
+ string const envname =
+ p.next_token().cat() == catBegin
+ ? p.getArg('{', '}') : string();
+ found_end_minted = t.asInput() == "\\end"
+ && envname == "minted";
+ }
+ eat_whitespace(p, os, parent_context, true);
+ Token const & t = p.get_token();
+ p.skip_spaces(true);
+ if (t.asInput() == "\\lyxmintcaption") {
+ string const pos = p.getArg('[', ']');
+ if (pos == "b") {
+ string const caption =
+ parse_text_snippet(p, FLAG_ITEM,
+ false, parent_context);
+ minted_nonfloat_caption = "[b]" + caption;
+ }
+ }
+ p.popPosition();
+ }
+ parse_listings(p, os, parent_context, false, use_minted);
p.skip_spaces();
}
// Alignment and spacing settings
// FIXME (bug xxxx): These settings can span multiple paragraphs and
// therefore are totally broken!
- // Note that \centering, raggedright, and raggedleft cannot be handled, as
+ // Note that \centering, \raggedright, and \raggedleft cannot be handled, as
// they are commands not environments. They are furthermore switches that
// can be ended by another switches, but also by commands like \footnote or
// \parbox. So the only safe way is to leave them untouched.
+ // However, we support the pseudo-environments
+ // \begin{centering} ... \end{centering}
+ // \begin{raggedright} ... \end{raggedright}
+ // \begin{raggedleft} ... \end{raggedleft}
+ // since they are used by LyX in floats (for spacing reasons)
else if (name == "center" || name == "centering" ||
- name == "flushleft" || name == "flushright" ||
+ name == "flushleft" || name == "raggedright" ||
+ name == "flushright" || name == "raggedleft" ||
name == "singlespace" || name == "onehalfspace" ||
name == "doublespace" || name == "spacing") {
eat_whitespace(p, os, parent_context, false);
parent_context.check_end_layout(os);
parent_context.new_paragraph(os);
}
- if (name == "flushleft")
+ if (name == "flushleft" || name == "raggedright")
parent_context.add_extra_stuff("\\align left\n");
- else if (name == "flushright")
+ else if (name == "flushright" || name == "raggedleft")
parent_context.add_extra_stuff("\\align right\n");
else if (name == "center" || name == "centering")
parent_context.add_extra_stuff("\\align center\n");
LASSERT(t.cat() == catComment, return);
if (!t.cs().empty()) {
context.check_layout(os);
- output_ert_inset(os, '%' + t.cs(), context);
+ output_comment(p, os, t.cs(), context);
if (p.next_token().cat() == catNewline) {
// A newline after a comment line starts a new
// paragraph
/// get the arguments of a natbib or jurabib citation command
void get_cite_arguments(Parser & p, bool natbibOrder,
- string & before, string & after)
+ string & before, string & after, bool const qualified = false)
{
// We need to distinguish "" and "[]", so we can't use p.getOpt().
// text before the citation
before.clear();
// text after the citation
- after = p.getFullOpt();
+ after = qualified ? p.getFullOpt(false, '(', ')') : p.getFullOpt();
if (!after.empty()) {
- before = p.getFullOpt();
+ before = qualified ? p.getFullOpt(false, '(', ')') : p.getFullOpt();
if (natbibOrder && !before.empty())
swap(before, after);
}
}
-/// Convert filenames with TeX macros and/or quotes to something LyX
-/// can understand
-string const normalize_filename(string const & name)
-{
- Parser p(name);
- ostringstream os;
- while (p.good()) {
- Token const & t = p.get_token();
- if (t.cat() != catEscape)
- os << t.asInput();
- else if (t.cs() == "lyxdot") {
- // This is used by LyX for simple dots in relative
- // names
- os << '.';
- p.skip_spaces();
- } else if (t.cs() == "space") {
- os << ' ';
- p.skip_spaces();
- } else if (t.cs() == "string") {
- // Convert \string" to " and \string~ to ~
- Token const & n = p.next_token();
- if (n.asInput() != "\"" && n.asInput() != "~")
- os << t.asInput();
- } else
- os << t.asInput();
- }
- // Strip quotes. This is a bit complicated (see latex_path()).
- string full = os.str();
- if (!full.empty() && full[0] == '"') {
- string base = removeExtension(full);
- string ext = getExtension(full);
- if (!base.empty() && base[base.length()-1] == '"')
- // "a b"
- // "a b".tex
- return addExtension(trim(base, "\""), ext);
- if (full[full.length()-1] == '"')
- // "a b.c"
- // "a b.c".tex
- return trim(full, "\"");
- }
- return full;
-}
-
-
-/// Convert \p name from TeX convention (relative to master file) to LyX
-/// convention (relative to .lyx file) if it is relative
-void fix_child_filename(string & name)
-{
- string const absMasterTeX = getMasterFilePath(true);
- bool const isabs = FileName::isAbsolute(name);
- // convert from "relative to .tex master" to absolute original path
- if (!isabs)
- name = makeAbsPath(name, absMasterTeX).absFileName();
- bool copyfile = copyFiles();
- string const absParentLyX = getParentFilePath(false);
- string abs = name;
- if (copyfile) {
- // convert from absolute original path to "relative to master file"
- string const rel = to_utf8(makeRelPath(from_utf8(name),
- from_utf8(absMasterTeX)));
- // re-interpret "relative to .tex file" as "relative to .lyx file"
- // (is different if the master .lyx file resides in a
- // different path than the master .tex file)
- string const absMasterLyX = getMasterFilePath(false);
- abs = makeAbsPath(rel, absMasterLyX).absFileName();
- // Do not copy if the new path is impossible to create. Example:
- // absMasterTeX = "/foo/bar/"
- // absMasterLyX = "/bar/"
- // name = "/baz.eps" => new absolute name would be "/../baz.eps"
- if (contains(name, "/../"))
- copyfile = false;
- }
- if (copyfile) {
- if (isabs)
- name = abs;
- else {
- // convert from absolute original path to
- // "relative to .lyx file"
- name = to_utf8(makeRelPath(from_utf8(abs),
- from_utf8(absParentLyX)));
- }
- }
- else if (!isabs) {
- // convert from absolute original path to "relative to .lyx file"
- name = to_utf8(makeRelPath(from_utf8(name),
- from_utf8(absParentLyX)));
- }
-}
-
-
void copy_file(FileName const & src, string dstname)
{
if (!copyFiles())
dst = FileName(dstname);
else
dst = makeAbsPath(dstname, absParent);
- string const absMaster = getMasterFilePath(false);
FileName const srcpath = src.onlyPath();
FileName const dstpath = dst.onlyPath();
if (equivalent(srcpath, dstpath))
} // anonymous namespace
-void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
- Context & context)
+/*!
+ * Find a file with basename \p name in path \p path and an extension
+ * in \p extensions.
+ */
+string find_file(string const & name, string const & path,
+ char const * const * extensions)
{
- Layout const * newlayout = 0;
- InsetLayout const * newinsetlayout = 0;
- char const * const * where = 0;
- // Store the latest bibliographystyle, addcontentslineContent and
- // nocite{*} option (needed for bibtex inset)
- string btprint;
- string contentslineContent;
- string bibliographystyle = "default";
- bool const use_natbib = isProvided("natbib");
- bool const use_jurabib = isProvided("jurabib");
- string last_env;
-
- // it is impossible to determine the correct encoding for non-CJK Japanese.
- // Therefore write a note at the beginning of the document
- if (is_nonCJKJapanese) {
- context.check_layout(os);
- begin_inset(os, "Note Note\n");
- os << "status open\n\\begin_layout Plain Layout\n"
- << "\\series bold\n"
- << "Important information:\n"
- << "\\end_layout\n\n"
- << "\\begin_layout Plain Layout\n"
- << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
- << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
- << " The iconv encoding " << p.getEncoding() << " was used.\n"
- << " If this is incorrect, you must run the tex2lyx program on the command line\n"
- << " and specify the encoding using the -e command-line switch.\n"
- << " In addition, you might want to double check that the desired output encoding\n"
- << " is correctly selected in Document > Settings > Language.\n"
- << "\\end_layout\n";
- end_inset(os);
- is_nonCJKJapanese = false;
+ for (char const * const * what = extensions; *what; ++what) {
+ string const trial = addExtension(name, *what);
+ if (makeAbsPath(trial, path).exists())
+ return trial;
}
+ return string();
+}
+
+/// Convert filenames with TeX macros and/or quotes to something LyX
+/// can understand
+string const normalize_filename(string const & name)
+{
+ Parser p(name);
+ ostringstream os;
while (p.good()) {
Token const & t = p.get_token();
-#ifdef FILEDEBUG
- debugToken(cerr, t, flags);
-#endif
-
- if (flags & FLAG_ITEM) {
- if (t.cat() == catSpace)
- continue;
-
- flags &= ~FLAG_ITEM;
- if (t.cat() == catBegin) {
- // skip the brace and collect everything to the next matching
- // closing brace
- flags |= FLAG_BRACE_LAST;
- continue;
- }
-
- // handle only this single token, leave the loop if done
- flags |= FLAG_LEAVE;
- }
-
- if (t.cat() != catEscape && t.character() == ']' &&
- (flags & FLAG_BRACK_LAST))
- return;
- if (t.cat() == catEnd && (flags & FLAG_BRACE_LAST))
- return;
-
- // If there is anything between \end{env} and \begin{env} we
- // don't need to output a separator.
+ if (t.cat() != catEscape)
+ os << t.asInput();
+ else if (t.cs() == "lyxdot") {
+ // This is used by LyX for simple dots in relative
+ // names
+ os << '.';
+ p.skip_spaces();
+ } else if (t.cs() == "space") {
+ os << ' ';
+ p.skip_spaces();
+ } else if (t.cs() == "string") {
+ // Convert \string" to " and \string~ to ~
+ Token const & n = p.next_token();
+ if (n.asInput() != "\"" && n.asInput() != "~")
+ os << t.asInput();
+ } else
+ os << t.asInput();
+ }
+ // Strip quotes. This is a bit complicated (see latex_path()).
+ string full = os.str();
+ if (!full.empty() && full[0] == '"') {
+ string base = removeExtension(full);
+ string ext = getExtension(full);
+ if (!base.empty() && base[base.length()-1] == '"')
+ // "a b"
+ // "a b".tex
+ return addExtension(trim(base, "\""), ext);
+ if (full[full.length()-1] == '"')
+ // "a b.c"
+ // "a b.c".tex
+ return trim(full, "\"");
+ }
+ return full;
+}
+
+
+/// Convert \p name from TeX convention (relative to master file) to LyX
+/// convention (relative to .lyx file) if it is relative
+void fix_child_filename(string & name)
+{
+ string const absMasterTeX = getMasterFilePath(true);
+ bool const isabs = FileName::isAbsolute(name);
+ // convert from "relative to .tex master" to absolute original path
+ if (!isabs)
+ name = makeAbsPath(name, absMasterTeX).absFileName();
+ bool copyfile = copyFiles();
+ string const absParentLyX = getParentFilePath(false);
+ string abs = name;
+ if (copyfile) {
+ // convert from absolute original path to "relative to master file"
+ string const rel = to_utf8(makeRelPath(from_utf8(name),
+ from_utf8(absMasterTeX)));
+ // re-interpret "relative to .tex file" as "relative to .lyx file"
+ // (is different if the master .lyx file resides in a
+ // different path than the master .tex file)
+ string const absMasterLyX = getMasterFilePath(false);
+ abs = makeAbsPath(rel, absMasterLyX).absFileName();
+ // Do not copy if the new path is impossible to create. Example:
+ // absMasterTeX = "/foo/bar/"
+ // absMasterLyX = "/bar/"
+ // name = "/baz.eps" => new absolute name would be "/../baz.eps"
+ if (contains(name, "/../"))
+ copyfile = false;
+ }
+ if (copyfile) {
+ if (isabs)
+ name = abs;
+ else {
+ // convert from absolute original path to
+ // "relative to .lyx file"
+ name = to_utf8(makeRelPath(from_utf8(abs),
+ from_utf8(absParentLyX)));
+ }
+ }
+ else if (!isabs) {
+ // convert from absolute original path to "relative to .lyx file"
+ name = to_utf8(makeRelPath(from_utf8(name),
+ from_utf8(absParentLyX)));
+ }
+}
+
+
+void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
+ Context & context)
+{
+ Layout const * newlayout = 0;
+ InsetLayout const * newinsetlayout = 0;
+ char const * const * where = 0;
+ // Store the latest bibliographystyle, addcontentslineContent and
+ // nocite{*} option (needed for bibtex inset)
+ string btprint;
+ string contentslineContent;
+ string bibliographystyle = "default";
+ bool const use_natbib = isProvided("natbib");
+ bool const use_jurabib = isProvided("jurabib");
+ bool const use_biblatex = isProvided("biblatex")
+ && preamble.citeEngine() != "biblatex-natbib";
+ bool const use_biblatex_natbib = isProvided("biblatex-natbib")
+ || (isProvided("biblatex") && preamble.citeEngine() == "biblatex-natbib");
+ need_commentbib = use_biblatex || use_biblatex_natbib;
+ string last_env;
+
+ // it is impossible to determine the correct encoding for non-CJK Japanese.
+ // Therefore write a note at the beginning of the document
+ if (is_nonCJKJapanese) {
+ context.check_layout(os);
+ begin_inset(os, "Note Note\n");
+ os << "status open\n\\begin_layout Plain Layout\n"
+ << "\\series bold\n"
+ << "Important information:\n"
+ << "\\end_layout\n\n"
+ << "\\begin_layout Plain Layout\n"
+ << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
+ << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
+ << " The iconv encoding " << p.getEncoding() << " was used.\n"
+ << " If this is incorrect, you must run the tex2lyx program on the command line\n"
+ << " and specify the encoding using the -e command-line switch.\n"
+ << " In addition, you might want to double check that the desired output encoding\n"
+ << " is correctly selected in Document > Settings > Language.\n"
+ << "\\end_layout\n";
+ end_inset(os);
+ is_nonCJKJapanese = false;
+ }
+
+ bool have_cycled = false;
+ while (p.good()) {
+ // Leave here only after at least one cycle
+ if (have_cycled && flags & FLAG_LEAVE) {
+ flags &= ~FLAG_LEAVE;
+ break;
+ }
+
+ Token const & t = p.get_token();
+#ifdef FILEDEBUG
+ debugToken(cerr, t, flags);
+#endif
+
+ if (flags & FLAG_ITEM) {
+ if (t.cat() == catSpace)
+ continue;
+
+ flags &= ~FLAG_ITEM;
+ if (t.cat() == catBegin) {
+ // skip the brace and collect everything to the next matching
+ // closing brace
+ flags |= FLAG_BRACE_LAST;
+ continue;
+ }
+
+ // handle only this single token, leave the loop if done
+ flags |= FLAG_LEAVE;
+ }
+
+ if (t.cat() != catEscape && t.character() == ']' &&
+ (flags & FLAG_BRACK_LAST))
+ return;
+ if (t.cat() == catEnd && (flags & FLAG_BRACE_LAST))
+ return;
+
+ // If there is anything between \end{env} and \begin{env} we
+ // don't need to output a separator.
if (t.cat() != catSpace && t.cat() != catNewline &&
t.asInput() != "\\begin")
last_env = "";
//
// cat codes
//
+ have_cycled = true;
bool const starred = p.next_token().asInput() == "*";
string const starredname(starred ? (t.cs() + '*') : t.cs());
if (t.cat() == catMath) {
// output, but looks ugly in LyX.
eat_whitespace(p, os, context, false);
}
+ continue;
}
- else if (t.cat() == catSuper || t.cat() == catSub)
+ if (t.cat() == catSuper || t.cat() == catSub) {
cerr << "catcode " << t << " illegal in text mode\n";
+ continue;
+ }
// Basic support for english quotes. This should be
// extended to other quotes, but is not so easy (a
// left english quote is the same as a right german
// quote...)
- else if (t.asInput() == "`" && p.next_token().asInput() == "`") {
+ if (t.asInput() == "`" && p.next_token().asInput() == "`") {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << "eld";
end_inset(os);
p.get_token();
skip_braces(p);
+ continue;
}
- else if (t.asInput() == "'" && p.next_token().asInput() == "'") {
+ if (t.asInput() == "'" && p.next_token().asInput() == "'") {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << "erd";
end_inset(os);
p.get_token();
skip_braces(p);
+ continue;
}
- else if (t.asInput() == ">" && p.next_token().asInput() == ">") {
+ if (t.asInput() == ">" && p.next_token().asInput() == ">") {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << "ald";
end_inset(os);
p.get_token();
skip_braces(p);
+ continue;
}
- else if (t.asInput() == "<"
+ if (t.asInput() == "<"
&& p.next_token().asInput() == "<") {
bool has_chunk = false;
if (noweb_mode) {
p.get_token();
skip_braces(p);
}
+ continue;
}
- else if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph()))
+ if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph())) {
check_space(p, os, context);
+ continue;
+ }
- else if (t.character() == '[' && noweb_mode &&
+ // babel shorthands (also used by polyglossia)
+ // Since these can have different meanings for different languages
+ // we import them as ERT (but they must be put in ERT to get output
+ // verbatim).
+ if (t.asInput() == "\"") {
+ string s = "\"";
+ // These are known pairs. We put them together in
+ // one ERT inset. In other cases (such as "a), only
+ // the quotation mark is ERTed.
+ if (p.next_token().asInput() == "\""
+ || p.next_token().asInput() == "|"
+ || p.next_token().asInput() == "-"
+ || p.next_token().asInput() == "~"
+ || p.next_token().asInput() == "="
+ || p.next_token().asInput() == "/"
+ || p.next_token().asInput() == "~"
+ || p.next_token().asInput() == "'"
+ || p.next_token().asInput() == "`"
+ || p.next_token().asInput() == "<"
+ || p.next_token().asInput() == ">") {
+ s += p.next_token().asInput();
+ p.get_token();
+ }
+ output_ert_inset(os, s, context);
+ continue;
+ }
+
+ if (t.character() == '[' && noweb_mode &&
p.next_token().character() == '[') {
// These can contain underscores
p.putback();
cerr << "Warning: Inserting missing ']' in '"
<< s << "'." << endl;
output_ert_inset(os, s, context);
+ continue;
}
- else if (t.cat() == catLetter) {
+ if (t.cat() == catLetter) {
context.check_layout(os);
os << t.cs();
+ continue;
}
- else if (t.cat() == catOther ||
+ if (t.cat() == catOther ||
t.cat() == catAlign ||
t.cat() == catParameter) {
context.check_layout(os);
} else
// This translates "&" to "\\&" which may be wrong...
os << t.cs();
+ continue;
}
- else if (p.isParagraph()) {
- if (context.new_layout_allowed)
- context.new_paragraph(os);
- else
- output_ert_inset(os, "\\par ", context);
+ if (p.isParagraph()) {
+ // In minted floating listings we will collect
+ // everything into the caption, where multiple
+ // paragraphs are forbidden.
+ if (minted_float.empty()) {
+ if (context.new_layout_allowed)
+ context.new_paragraph(os);
+ else
+ output_ert_inset(os, "\\par ", context);
+ } else
+ os << ' ';
eat_whitespace(p, os, context, true);
+ continue;
}
- else if (t.cat() == catActive) {
+ if (t.cat() == catActive) {
context.check_layout(os);
if (t.character() == '~') {
if (context.layout->free_spacing)
}
} else
os << t.cs();
+ continue;
}
- else if (t.cat() == catBegin) {
+ if (t.cat() == catBegin) {
Token const next = p.next_token();
Token const end = p.next_next_token();
if (next.cat() == catEnd) {
output_ert_inset(os, "}", context);
}
}
+ continue;
}
- else if (t.cat() == catEnd) {
+ if (t.cat() == catEnd) {
if (flags & FLAG_BRACE_LAST) {
return;
}
cerr << "stray '}' in text\n";
output_ert_inset(os, "}", context);
+ continue;
}
- else if (t.cat() == catComment)
+ if (t.cat() == catComment) {
parse_comment(p, os, t, context);
+ continue;
+ }
//
// control sequences
//
- else if (t.cs() == "(" || t.cs() == "[") {
+ if (t.cs() == "(" || t.cs() == "[") {
bool const simple = t.cs() == "(";
context.check_layout(os);
begin_inset(os, "Formula");
// output, but looks ugly in LyX.
eat_whitespace(p, os, context, false);
}
+ continue;
}
- else if (t.cs() == "begin")
+ if (t.cs() == "begin") {
parse_environment(p, os, outer, last_env,
context);
+ continue;
+ }
- else if (t.cs() == "end") {
+ if (t.cs() == "end") {
if (flags & FLAG_END) {
// eat environment name
string const name = p.getArg('{', '}');
return;
}
p.error("found 'end' unexpectedly");
+ continue;
}
- else if (t.cs() == "item") {
+ if (t.cs() == "item") {
string s;
bool const optarg = p.hasOpt();
if (optarg) {
eat_whitespace(p, os, context, false);
}
}
+ continue;
}
- else if (t.cs() == "bibitem") {
+ if (t.cs() == "bibitem") {
context.set_item();
context.check_layout(os);
eat_whitespace(p, os, context, false);
} else {
begin_command_inset(os, "bibitem", "bibitem");
os << "label \"" << label << "\"\n"
- "key \"" << key << "\"\n";
+ << "key \"" << key << "\"\n"
+ << "literal \"true\"\n";
end_inset(os);
}
+ continue;
}
- else if (is_macro(p)) {
+ if (is_macro(p)) {
// catch the case of \def\inputGnumericTable
bool macro = true;
if (t.cs() == "def") {
}
if (macro)
parse_macro(p, os, context);
+ continue;
}
- else if (t.cs() == "noindent") {
+ if (t.cs() == "noindent") {
p.skip_spaces();
context.add_par_extra_stuff("\\noindent\n");
+ continue;
}
- else if (t.cs() == "appendix") {
+ if (t.cs() == "appendix") {
context.add_par_extra_stuff("\\start_of_appendix\n");
// We need to start a new paragraph. Otherwise the
// appendix in 'bla\appendix\chapter{' would start
context.check_layout(os);
// FIXME: This is a hack to prevent paragraph
// deletion if it is empty. Handle this better!
- output_ert_inset(os,
- "%dummy comment inserted by tex2lyx to "
+ output_comment(p, os,
+ "dummy comment inserted by tex2lyx to "
"ensure that this paragraph is not empty",
context);
// Both measures above may generate an additional
// empty paragraph, but that does not hurt, because
// whitespace does not matter here.
eat_whitespace(p, os, context, true);
+ continue;
}
// Must catch empty dates before findLayout is called below
- else if (t.cs() == "date") {
+ if (t.cs() == "date") {
eat_whitespace(p, os, context, false);
p.pushPosition();
string const date = p.verbatim_item();
"\\date{" + p.verbatim_item() + '}',
context);
}
+ continue;
}
// Starred section headings
// Must attempt to parse "Section*" before "Section".
- else if ((p.next_token().asInput() == "*") &&
+ if ((p.next_token().asInput() == "*") &&
context.new_layout_allowed &&
(newlayout = findLayout(context.textclass, t.cs() + '*', true))) {
// write the layout
set<string> const & req = newlayout->requires();
for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
preamble.registerAutomaticallyLoadedPackage(*it);
+ continue;
}
// Section headings and the like
- else if (context.new_layout_allowed &&
+ if (context.new_layout_allowed &&
(newlayout = findLayout(context.textclass, t.cs(), true))) {
// write the layout
output_command_layout(os, p, outer, context, newlayout);
set<string> const & req = newlayout->requires();
for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
preamble.registerAutomaticallyLoadedPackage(*it);
+ continue;
}
- else if (t.cs() == "subfloat") {
+ if (t.cs() == "subfloat") {
// the syntax is \subfloat[list entry][sub caption]{content}
// if it is a table of figure depends on the surrounding float
- // FIXME: second optional argument is not parsed
- bool has_caption = false;
p.skip_spaces();
// do nothing if there is no outer float
if (!float_type.empty()) {
<< "\nstatus collapsed\n\n";
// test for caption
string caption;
+ bool has_caption = false;
if (p.next_token().cat() != catEscape &&
p.next_token().character() == '[') {
p.get_token(); // eat '['
caption = parse_text_snippet(p, FLAG_BRACK_LAST, outer, context);
has_caption = true;
}
+ // In case we have two optional args, the second is the caption.
+ if (p.next_token().cat() != catEscape &&
+ p.next_token().character() == '[') {
+ p.get_token(); // eat '['
+ caption = parse_text_snippet(p, FLAG_BRACK_LAST, outer, context);
+ }
// the content
parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
// the caption comes always as the last
newcontext.check_layout(os);
os << caption << "\n";
newcontext.check_end_layout(os);
- // We don't need really a new paragraph, but
- // we must make sure that the next item gets a \begin_layout.
- //newcontext.new_paragraph(os);
end_inset(os);
p.skip_spaces();
+ // close the layout we opened
+ os << "\n\\end_layout";
}
- // We don't need really a new paragraph, but
- // we must make sure that the next item gets a \begin_layout.
- if (has_caption)
- context.new_paragraph(os);
end_inset(os);
p.skip_spaces();
- context.check_end_layout(os);
- // close the layout we opened
- if (has_caption)
- os << "\n\\end_layout\n";
} else {
// if the float type is not supported or there is no surrounding float
// output it as ERT
+ string opt_arg1;
+ string opt_arg2;
if (p.hasOpt()) {
- string opt_arg = convert_command_inset_arg(p.getArg('[', ']'));
- output_ert_inset(os, t.asInput() + '[' + opt_arg +
- "]{" + p.verbatim_item() + '}', context);
- } else
- output_ert_inset(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
+ opt_arg1 = convert_command_inset_arg(p.getFullOpt());
+ if (p.hasOpt())
+ opt_arg2 = convert_command_inset_arg(p.getFullOpt());
+ }
+ output_ert_inset(os, t.asInput() + opt_arg1 + opt_arg2
+ + "{" + p.verbatim_item() + '}', context);
}
+ continue;
}
- else if (t.cs() == "includegraphics") {
+ if (t.cs() == "includegraphics") {
bool const clip = p.next_token().asInput() == "*";
if (clip)
p.get_token();
// Check whether some option was given twice.
end_inset(os);
preamble.registerAutomaticallyLoadedPackage("graphicx");
+ continue;
}
- else if (t.cs() == "footnote" ||
+ if (t.cs() == "footnote" ||
(t.cs() == "thanks" && context.layout->intitle)) {
p.skip_spaces();
context.check_layout(os);
os << "status collapsed\n\n";
parse_text_in_inset(p, os, FLAG_ITEM, false, context);
end_inset(os);
+ continue;
}
- else if (t.cs() == "marginpar") {
+ if (t.cs() == "marginpar") {
p.skip_spaces();
context.check_layout(os);
begin_inset(os, "Marginal\n");
os << "status collapsed\n\n";
parse_text_in_inset(p, os, FLAG_ITEM, false, context);
end_inset(os);
+ continue;
}
- else if (t.cs() == "lstinline") {
+ if (t.cs() == "lstinline" || t.cs() == "mintinline") {
+ bool const use_minted = t.cs() == "mintinline";
p.skip_spaces();
- parse_listings(p, os, context, true);
+ parse_listings(p, os, context, true, use_minted);
+ continue;
}
- else if (t.cs() == "ensuremath") {
+ if (t.cs() == "ensuremath") {
p.skip_spaces();
context.check_layout(os);
string const s = p.verbatim_item();
else
output_ert_inset(os, "\\ensuremath{" + s + "}",
context);
+ continue;
}
else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
skip_spaces_braces(p);
} else
output_ert_inset(os, t.asInput(), context);
+ continue;
}
- else if (t.cs() == "tableofcontents" || t.cs() == "lstlistoflistings") {
+ if (t.cs() == "tableofcontents"
+ || t.cs() == "lstlistoflistings"
+ || t.cs() == "listoflistings") {
+ string name = t.cs();
+ if (preamble.minted() && name == "listoflistings")
+ name.insert(0, "lst");
context.check_layout(os);
- begin_command_inset(os, "toc", t.cs());
+ begin_command_inset(os, "toc", name);
end_inset(os);
skip_spaces_braces(p);
- if (t.cs() == "lstlistoflistings")
- preamble.registerAutomaticallyLoadedPackage("listings");
+ if (name == "lstlistoflistings") {
+ if (preamble.minted())
+ preamble.registerAutomaticallyLoadedPackage("minted");
+ else
+ preamble.registerAutomaticallyLoadedPackage("listings");
+ }
+ continue;
}
- else if (t.cs() == "listoffigures" || t.cs() == "listoftables") {
+ if (t.cs() == "listoffigures" || t.cs() == "listoftables") {
context.check_layout(os);
if (t.cs() == "listoffigures")
begin_inset(os, "FloatList figure\n");
begin_inset(os, "FloatList table\n");
end_inset(os);
skip_spaces_braces(p);
+ continue;
}
- else if (t.cs() == "listof") {
+ if (t.cs() == "listof") {
p.skip_spaces(true);
string const name = p.get_token().cs();
if (context.textclass.floats().typeExist(name)) {
p.get_token(); // swallow second arg
} else
output_ert_inset(os, "\\listof{" + name + "}", context);
+ continue;
}
- else if ((where = is_known(t.cs(), known_text_font_families)))
+ if ((where = is_known(t.cs(), known_text_font_families))) {
parse_text_attributes(p, os, FLAG_ITEM, outer,
context, "\\family", context.font.family,
known_coded_font_families[where - known_text_font_families]);
+ continue;
+ }
- else if ((where = is_known(t.cs(), known_text_font_series)))
+ if ((where = is_known(t.cs(), known_text_font_series))) {
parse_text_attributes(p, os, FLAG_ITEM, outer,
context, "\\series", context.font.series,
known_coded_font_series[where - known_text_font_series]);
+ continue;
+ }
- else if ((where = is_known(t.cs(), known_text_font_shapes)))
+ if ((where = is_known(t.cs(), known_text_font_shapes))) {
parse_text_attributes(p, os, FLAG_ITEM, outer,
context, "\\shape", context.font.shape,
known_coded_font_shapes[where - known_text_font_shapes]);
+ continue;
+ }
- else if (t.cs() == "textnormal" || t.cs() == "normalfont") {
+ if (t.cs() == "textnormal" || t.cs() == "normalfont") {
context.check_layout(os);
TeXFont oldFont = context.font;
context.font.init();
context.font = oldFont;
} else
eat_whitespace(p, os, context, false);
+ continue;
}
- else if (t.cs() == "textcolor") {
+ if (t.cs() == "textcolor") {
// scheme is \textcolor{color name}{text}
string const color = p.verbatim_item();
// we support the predefined colors of the color and the xcolor package
} else
// for custom defined colors
output_ert_inset(os, t.asInput() + "{" + color + "}", context);
+ continue;
}
- else if (t.cs() == "underbar" || t.cs() == "uline") {
+ if (t.cs() == "underbar" || t.cs() == "uline") {
// \underbar is not 100% correct (LyX outputs \uline
// of ulem.sty). The difference is that \ulem allows
// line breaks, and \underbar does not.
context.check_layout(os);
os << "\n\\bar default\n";
preamble.registerAutomaticallyLoadedPackage("ulem");
+ continue;
}
- else if (t.cs() == "sout") {
+ if (t.cs() == "sout") {
context.check_layout(os);
os << "\n\\strikeout on\n";
parse_text_snippet(p, os, FLAG_ITEM, outer, context);
context.check_layout(os);
os << "\n\\strikeout default\n";
preamble.registerAutomaticallyLoadedPackage("ulem");
+ continue;
}
- else if (t.cs() == "uuline" || t.cs() == "uwave" ||
- t.cs() == "emph" || t.cs() == "noun") {
+ if (t.cs() == "uuline" || t.cs() == "uwave"
+ || t.cs() == "emph" || t.cs() == "noun"
+ || t.cs() == "xout") {
context.check_layout(os);
os << "\n\\" << t.cs() << " on\n";
parse_text_snippet(p, os, FLAG_ITEM, outer, context);
context.check_layout(os);
os << "\n\\" << t.cs() << " default\n";
- if (t.cs() == "uuline" || t.cs() == "uwave")
+ if (t.cs() == "uuline" || t.cs() == "uwave" || t.cs() == "xout")
preamble.registerAutomaticallyLoadedPackage("ulem");
+ continue;
}
- else if (t.cs() == "lyxadded" || t.cs() == "lyxdeleted") {
+ if (t.cs() == "lyxadded" || t.cs() == "lyxdeleted") {
context.check_layout(os);
string name = p.getArg('{', '}');
string localtime = p.getArg('{', '}');
preamble.registerAutomaticallyLoadedPackage("xcolor");
}
}
+ continue;
}
- else if (t.cs() == "textipa") {
+ if (t.cs() == "textipa") {
context.check_layout(os);
begin_inset(os, "IPA\n");
bool merging_hyphens_allowed = context.merging_hyphens_allowed;
end_inset(os);
preamble.registerAutomaticallyLoadedPackage("tipa");
preamble.registerAutomaticallyLoadedPackage("tipx");
+ continue;
}
- else if (t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
+ if (t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
context.check_layout(os);
begin_inset(os, "IPADeco " + t.cs().substr(4) + "\n");
os << "status open\n";
parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
end_inset(os);
p.skip_spaces();
+ continue;
}
- else if (t.cs() == "textvertline") {
+ if (t.cs() == "textvertline") {
// FIXME: This is not correct, \textvertline is higher than |
os << "|";
skip_braces(p);
continue;
}
- else if (t.cs() == "tone" ) {
+ if (t.cs() == "tone" ) {
context.check_layout(os);
// register the tone package
preamble.registerAutomaticallyLoadedPackage("tone");
} else
// we did not find a non-ert version
output_ert_inset(os, command, context);
+ continue;
}
- else if (t.cs() == "phantom" || t.cs() == "hphantom" ||
+ if (t.cs() == "phantom" || t.cs() == "hphantom" ||
t.cs() == "vphantom") {
context.check_layout(os);
if (t.cs() == "phantom")
parse_text_in_inset(p, os, FLAG_ITEM, outer, context,
"Phantom");
end_inset(os);
+ continue;
}
- else if (t.cs() == "href") {
+ if (t.cs() == "href") {
context.check_layout(os);
string target = convert_command_inset_arg(p.verbatim_item());
string name = convert_command_inset_arg(p.verbatim_item());
type = target.substr(0, i + 1);
if (type == "mailto:" || type == "file:")
target = target.substr(i + 1);
- // handle the case that name is equal to target, except of "http://"
- else if (target.substr(i + 3) == name && type == "http:")
+ // handle the case that name is equal to target, except of "http(s)://"
+ else if (target.substr(i + 3) == name && (type == "http:" || type == "https:"))
target = name;
}
begin_command_inset(os, "href", "href");
os << "target \"" << target << "\"\n";
if (type == "mailto:" || type == "file:")
os << "type \"" << type << "\"\n";
+ os << "literal \"true\"\n";
end_inset(os);
skip_spaces_braces(p);
+ continue;
}
- else if (t.cs() == "lyxline") {
+ if (t.cs() == "lyxline") {
// swallow size argument (it is not used anyway)
p.getArg('{', '}');
if (!context.atParagraphStart()) {
"width \"100line%\"\n"
"height \"1pt\"\n";
end_inset(os);
+ continue;
}
- else if (t.cs() == "rule") {
+ if (t.cs() == "rule") {
string const offset = (p.hasOpt() ? p.getArg('[', ']') : string());
string const width = p.getArg('{', '}');
string const thickness = p.getArg('{', '}');
os << "width \"" << translate_len(width) << "\"\n"
"height \"" << translate_len(thickness) << "\"\n";
end_inset(os);
+ continue;
}
// handle refstyle first to catch \eqref which can also occur
// without refstyle. Only recognize these commands if
// refstyle.sty was found in the preamble (otherwise \eqref
// and user defined ref commands could be misdetected).
- else if ((where = is_known(t.cs(), known_refstyle_commands)) &&
- preamble.refstyle()) {
+ if ((where = is_known(t.cs(), known_refstyle_commands))
+ && preamble.refstyle()) {
context.check_layout(os);
begin_command_inset(os, "ref", "formatted");
os << "reference \"";
<< ":";
os << convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
+ os << "plural \"false\"\n";
+ os << "caps \"false\"\n";
+ os << "noprefix \"false\"\n";
end_inset(os);
preamble.registerAutomaticallyLoadedPackage("refstyle");
+ continue;
}
// if refstyle is used, we must not convert \prettyref to a
// formatted reference, since that would result in a refstyle command.
- else if ((where = is_known(t.cs(), known_ref_commands)) &&
+ if ((where = is_known(t.cs(), known_ref_commands)) &&
(t.cs() != "prettyref" || !preamble.refstyle())) {
string const opt = p.getOpt();
if (opt.empty()) {
os << "reference \""
<< convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
+ os << "plural \"false\"\n";
+ os << "caps \"false\"\n";
+ os << "noprefix \"false\"\n";
end_inset(os);
if (t.cs() == "vref" || t.cs() == "vpageref")
preamble.registerAutomaticallyLoadedPackage("varioref");
output_ert_inset(os, t.asInput() + '[' + opt + "]{" +
p.verbatim_item() + '}', context);
}
+ continue;
}
- else if (use_natbib &&
+ if (use_natbib &&
is_known(t.cs(), known_natbib_commands) &&
((t.cs() != "citefullauthor" &&
t.cs() != "citeyear" &&
os << "before " << '"' << before << '"' << "\n";
os << "key \""
<< convert_command_inset_arg(p.verbatim_item())
- << "\"\n";
+ << "\"\n"
+ << "literal \"true\"\n";
end_inset(os);
// Need to set the cite engine if natbib is loaded by
// the document class directly
if (preamble.citeEngine() == "basic")
preamble.citeEngine("natbib");
+ continue;
+ }
+
+ if ((use_biblatex
+ && is_known(t.cs(), known_biblatex_commands)
+ && ((t.cs() == "cite"
+ || t.cs() == "citeauthor"
+ || t.cs() == "Citeauthor"
+ || t.cs() == "parencite"
+ || t.cs() == "citetitle")
+ || p.next_token().asInput() != "*"))
+ || (use_biblatex_natbib
+ && (is_known(t.cs(), known_biblatex_commands)
+ || is_known(t.cs(), known_natbib_commands))
+ && ((t.cs() == "cite" || t.cs() == "citet" || t.cs() == "Citet"
+ || t.cs() == "citep" || t.cs() == "Citep" || t.cs() == "citealt"
+ || t.cs() == "Citealt" || t.cs() == "citealp" || t.cs() == "Citealp"
+ || t.cs() == "citeauthor" || t.cs() == "Citeauthor"
+ || t.cs() == "parencite" || t.cs() == "citetitle")
+ || p.next_token().asInput() != "*"))){
+ context.check_layout(os);
+ string command = t.cs();
+ if (p.next_token().asInput() == "*") {
+ command += '*';
+ p.get_token();
+ }
+
+ bool const qualified = suffixIs(command, "s");
+ if (qualified)
+ command = rtrim(command, "s");
+
+ // text before the citation
+ string before;
+ // text after the citation
+ string after;
+ get_cite_arguments(p, true, before, after, qualified);
+
+ // These use natbib cmd names in LyX
+ // for inter-citeengine compativility
+ if (command == "citeyear")
+ command = "citebyear";
+ else if (command == "cite*")
+ command = "citeyear";
+ else if (command == "textcite")
+ command = "citet";
+ else if (command == "Textcite")
+ command = "Citet";
+ else if (command == "parencite")
+ command = "citep";
+ else if (command == "Parencite")
+ command = "Citep";
+ else if (command == "parencite*")
+ command = "citeyearpar";
+ else if (command == "smartcite")
+ command = "footcite";
+ else if (command == "Smartcite")
+ command = "Footcite";
+
+ string const emptyarg = qualified ? "()" : "[]";
+ if (before.empty() && after == emptyarg)
+ // avoid \cite[]{a}
+ after.erase();
+ else if (before == emptyarg && after == emptyarg) {
+ // avoid \cite[][]{a}
+ before.erase();
+ after.erase();
+ }
+ // remove the brackets around after and before
+ if (!after.empty()) {
+ after.erase(0, 1);
+ after.erase(after.length() - 1, 1);
+ after = convert_command_inset_arg(after);
+ }
+ if (!before.empty()) {
+ before.erase(0, 1);
+ before.erase(before.length() - 1, 1);
+ before = convert_command_inset_arg(before);
+ }
+ string keys, pretextlist, posttextlist;
+ if (qualified) {
+ map<string, string> pres;
+ map<string, string> posts;
+ vector<string> lkeys;
+ // text before the citation
+ string lbefore;
+ // text after the citation
+ string lafter;
+ string lkey;
+ while (true) {
+ get_cite_arguments(p, true, lbefore, lafter);
+ // remove the brackets around after and before
+ if (!lafter.empty()) {
+ lafter.erase(0, 1);
+ lafter.erase(lafter.length() - 1, 1);
+ lafter = convert_command_inset_arg(lafter);
+ }
+ if (!lbefore.empty()) {
+ lbefore.erase(0, 1);
+ lbefore.erase(lbefore.length() - 1, 1);
+ lbefore = convert_command_inset_arg(lbefore);
+ }
+ if (lbefore.empty() && lafter == "[]")
+ // avoid \cite[]{a}
+ lafter.erase();
+ else if (lbefore == "[]" && lafter == "[]") {
+ // avoid \cite[][]{a}
+ lbefore.erase();
+ lafter.erase();
+ }
+ lkey = p.getArg('{', '}');
+ if (lkey.empty())
+ break;
+ if (!lbefore.empty())
+ pres.insert(make_pair(lkey, lbefore));
+ if (!lafter.empty())
+ posts.insert(make_pair(lkey, lafter));
+ lkeys.push_back(lkey);
+ }
+ keys = convert_command_inset_arg(getStringFromVector(lkeys));
+ for (auto const & ptl : pres) {
+ if (!pretextlist.empty())
+ pretextlist += '\t';
+ pretextlist += ptl.first + " " + ptl.second;
+ }
+ for (auto const & potl : posts) {
+ if (!posttextlist.empty())
+ posttextlist += '\t';
+ posttextlist += potl.first + " " + potl.second;
+ }
+ } else
+ keys = convert_command_inset_arg(p.verbatim_item());
+ begin_command_inset(os, "citation", command);
+ os << "after " << '"' << after << '"' << "\n";
+ os << "before " << '"' << before << '"' << "\n";
+ os << "key \""
+ << keys
+ << "\"\n";
+ if (!pretextlist.empty())
+ os << "pretextlist " << '"' << pretextlist << '"' << "\n";
+ if (!posttextlist.empty())
+ os << "posttextlist " << '"' << posttextlist << '"' << "\n";
+ os << "literal \"true\"\n";
+ end_inset(os);
+ // Need to set the cite engine if biblatex is loaded by
+ // the document class directly
+ if (preamble.citeEngine() == "basic")
+ use_biblatex_natbib ?
+ preamble.citeEngine("biblatex-natbib")
+ : preamble.citeEngine("biblatex");
+ continue;
}
- else if (use_jurabib &&
+ if (use_jurabib &&
is_known(t.cs(), known_jurabib_commands) &&
(t.cs() == "cite" || p.next_token().asInput() != "*")) {
context.check_layout(os);
before.erase(before.length() - 1, 1);
}
begin_command_inset(os, "citation", command);
- os << "after " << '"' << after << '"' << "\n";
- os << "before " << '"' << before << '"' << "\n";
- os << "key " << '"' << citation << '"' << "\n";
+ os << "after " << '"' << after << "\"\n"
+ << "before " << '"' << before << "\"\n"
+ << "key " << '"' << citation << "\"\n"
+ << "literal \"true\"\n";
end_inset(os);
// Need to set the cite engine if jurabib is loaded by
// the document class directly
if (preamble.citeEngine() == "basic")
preamble.citeEngine("jurabib");
+ continue;
}
- else if (t.cs() == "cite"
+ if (t.cs() == "cite"
|| t.cs() == "nocite") {
context.check_layout(os);
string after = convert_command_inset_arg(p.getArg('[', ']'));
// the BibTeX inset
if (key != "*") {
begin_command_inset(os, "citation", t.cs());
- os << "after " << '"' << after << '"' << "\n";
- os << "key " << '"' << key << '"' << "\n";
+ os << "after " << '"' << after << "\"\n"
+ << "key " << '"' << key << "\"\n"
+ << "literal \"true\"\n";
end_inset(os);
} else if (t.cs() == "nocite")
btprint = key;
+ continue;
}
- else if (t.cs() == "index" ||
- (t.cs() == "sindex" && preamble.use_indices() == "true")) {
+ if (t.cs() == "index" ||
+ (t.cs() == "sindex" && preamble.use_indices() == "true")) {
context.check_layout(os);
string const arg = (t.cs() == "sindex" && p.hasOpt()) ?
p.getArg('[', ']') : "";
end_inset(os);
if (kind != "idx")
preamble.registerAutomaticallyLoadedPackage("splitidx");
+ continue;
}
- else if (t.cs() == "nomenclature") {
+ if (t.cs() == "nomenclature") {
context.check_layout(os);
begin_command_inset(os, "nomenclature", "nomenclature");
string prefix = convert_command_inset_arg(p.getArg('[', ']'));
<< convert_command_inset_arg(p.verbatim_item());
os << "\"\ndescription \""
<< convert_command_inset_arg(p.verbatim_item())
- << "\"\n";
+ << "\"\n"
+ << "literal \"true\"\n";
end_inset(os);
preamble.registerAutomaticallyLoadedPackage("nomencl");
+ continue;
}
- else if (t.cs() == "label") {
+ if (t.cs() == "label") {
context.check_layout(os);
begin_command_inset(os, "label", "label");
os << "name \""
<< convert_command_inset_arg(p.verbatim_item())
<< "\"\n";
end_inset(os);
+ continue;
+ }
+
+ if (t.cs() == "lyxmintcaption") {
+ string const pos = p.getArg('[', ']');
+ if (pos == "t") {
+ string const caption =
+ parse_text_snippet(p, FLAG_ITEM, false,
+ context);
+ minted_nonfloat_caption = "[t]" + caption;
+ } else {
+ // We already got the caption at the bottom,
+ // so simply skip it.
+ parse_text_snippet(p, FLAG_ITEM, false, context);
+ }
+ continue;
}
- else if (t.cs() == "printindex" || t.cs() == "printsubindex") {
+ if (t.cs() == "printindex" || t.cs() == "printsubindex") {
context.check_layout(os);
string commandname = t.cs();
bool star = false;
os << "type \"idx\"\n";
else
os << "type \"" << indexname << "\"\n";
+ os << "literal \"true\"\n";
}
end_inset(os);
skip_spaces_braces(p);
preamble.registerAutomaticallyLoadedPackage("makeidx");
if (preamble.use_indices() == "true")
preamble.registerAutomaticallyLoadedPackage("splitidx");
+ continue;
}
- else if (t.cs() == "printnomenclature") {
+ if (t.cs() == "printnomenclature") {
string width = "";
string width_type = "";
context.check_layout(os);
end_inset(os);
skip_spaces_braces(p);
preamble.registerAutomaticallyLoadedPackage("nomencl");
+ continue;
}
- else if ((t.cs() == "textsuperscript" || t.cs() == "textsubscript")) {
+ if ((t.cs() == "textsuperscript" || t.cs() == "textsubscript")) {
context.check_layout(os);
begin_inset(os, "script ");
os << t.cs().substr(4) << '\n';
end_inset(os);
if (t.cs() == "textsubscript")
preamble.registerAutomaticallyLoadedPackage("subscript");
+ continue;
}
- else if ((where = is_known(t.cs(), known_quotes))) {
+ if ((where = is_known(t.cs(), known_quotes))) {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << known_coded_quotes[where - known_quotes];
// {} pair.
eat_whitespace(p, os, context, false);
skip_braces(p);
+ continue;
}
- else if ((where = is_known(t.cs(), known_sizes)) &&
+ if ((where = is_known(t.cs(), known_sizes)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
context.font.size = known_coded_sizes[where - known_sizes];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_font_families)) &&
+ if ((where = is_known(t.cs(), known_font_families)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_families[where - known_font_families];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_font_series)) &&
+ if ((where = is_known(t.cs(), known_font_series)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_series[where - known_font_series];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_font_shapes)) &&
+ if ((where = is_known(t.cs(), known_font_shapes)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_shapes[where - known_font_shapes];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_old_font_families)) &&
+ if ((where = is_known(t.cs(), known_old_font_families)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_families[where - known_old_font_families];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_old_font_series)) &&
+ if ((where = is_known(t.cs(), known_old_font_series)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_series[where - known_old_font_series];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if ((where = is_known(t.cs(), known_old_font_shapes)) &&
+ if ((where = is_known(t.cs(), known_old_font_shapes)) &&
context.new_layout_allowed) {
context.check_layout(os);
TeXFont const oldFont = context.font;
known_coded_font_shapes[where - known_old_font_shapes];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
+ continue;
}
- else if (t.cs() == "selectlanguage") {
+ if (t.cs() == "selectlanguage") {
context.check_layout(os);
// save the language for the case that a
// \foreignlanguage is used
context.font.language = babel2lyx(p.verbatim_item());
os << "\n\\lang " << context.font.language << "\n";
+ continue;
}
- else if (t.cs() == "foreignlanguage") {
+ if (t.cs() == "foreignlanguage") {
string const lang = babel2lyx(p.verbatim_item());
parse_text_attributes(p, os, FLAG_ITEM, outer,
context, "\\lang",
context.font.language, lang);
+ continue;
}
- else if (prefixIs(t.cs(), "text") && preamble.usePolyglossia()
+ if (prefixIs(t.cs(), "text") && preamble.usePolyglossia()
&& is_known(t.cs().substr(4), preamble.polyglossia_languages)) {
// scheme is \textLANGUAGE{text} where LANGUAGE is in polyglossia_languages[]
string lang;
context, "\\lang",
context.font.language, lang);
}
+ continue;
}
- else if (t.cs() == "inputencoding") {
+ if (t.cs() == "inputencoding") {
// nothing to write here
string const enc = subst(p.verbatim_item(), "\n", " ");
p.setEncoding(enc, Encoding::inputenc);
+ continue;
}
- else if (is_known(t.cs(), known_special_chars) ||
- (t.cs() == "protect" &&
- p.next_token().cat() == catEscape &&
- is_known(p.next_token().cs(), known_special_protect_chars))) {
+ if (is_known(t.cs(), known_special_chars) ||
+ (t.cs() == "protect" &&
+ p.next_token().cat() == catEscape &&
+ is_known(p.next_token().cs(), known_special_protect_chars))) {
// LyX sometimes puts a \protect in front, so we have to ignore it
where = is_known(
t.cs() == "protect" ? p.get_token().cs() : t.cs(),
context.check_layout(os);
os << known_coded_special_chars[where - known_special_chars];
skip_spaces_braces(p);
+ continue;
}
- else if ((t.cs() == "nobreakdash" && p.next_token().asInput() == "-") ||
+ if ((t.cs() == "nobreakdash" && p.next_token().asInput() == "-") ||
(t.cs() == "protect" && p.next_token().asInput() == "\\nobreakdash" &&
p.next_next_token().asInput() == "-") ||
(t.cs() == "@" && p.next_token().asInput() == ".")) {
else
os << "\\SpecialChar endofsentence\n";
p.get_token();
+ continue;
}
- else if (t.cs() == "textquotedbl") {
+ if (t.cs() == "textquotedbl") {
context.check_layout(os);
os << "\"";
skip_braces(p);
+ continue;
}
- else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
+ if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
|| t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
|| t.cs() == "%" || t.cs() == "-") {
context.check_layout(os);
os << "\\SpecialChar softhyphen\n";
else
os << t.cs();
+ continue;
}
- else if (t.cs() == "char") {
+ if (t.cs() == "char") {
context.check_layout(os);
if (p.next_token().character() == '`') {
p.get_token();
} else {
output_ert_inset(os, "\\char", context);
}
+ continue;
}
- else if (t.cs() == "verb") {
+ if (t.cs() == "verb") {
context.check_layout(os);
// set catcodes to verbatim early, just in case.
p.setCatcodes(VERBATIM_CATCODES);
+ arg.second + delim, context);
else
cerr << "invalid \\verb command. Skipping" << endl;
+ continue;
}
// Problem: \= creates a tabstop inside the tabbing environment
// and else an accent. In the latter case we really would want
// \={o} instead of \= o.
- else if (t.cs() == "=" && (flags & FLAG_TABBING))
+ if (t.cs() == "=" && (flags & FLAG_TABBING)) {
output_ert_inset(os, t.asInput(), context);
+ continue;
+ }
- else if (t.cs() == "\\") {
+ if (t.cs() == "\\") {
context.check_layout(os);
if (p.hasOpt())
output_ert_inset(os, "\\\\" + p.getOpt(), context);
begin_inset(os, "Newline newline");
end_inset(os);
}
+ continue;
}
- else if (t.cs() == "newline" ||
- (t.cs() == "linebreak" && !p.hasOpt())) {
+ if (t.cs() == "newline" ||
+ (t.cs() == "linebreak" && !p.hasOpt())) {
context.check_layout(os);
begin_inset(os, "Newline ");
os << t.cs();
end_inset(os);
skip_spaces_braces(p);
+ continue;
}
- else if (t.cs() == "input" || t.cs() == "include"
- || t.cs() == "verbatiminput") {
+ if (t.cs() == "input" || t.cs() == "include"
+ || t.cs() == "verbatiminput") {
string name = t.cs();
if (t.cs() == "verbatiminput"
&& p.next_token().asInput() == "*")
preamble.registerAutomaticallyLoadedPackage("verbatim");
}
end_inset(os);
+ continue;
}
- else if (t.cs() == "bibliographystyle") {
+ if (t.cs() == "bibliographystyle") {
// store new bibliographystyle
bibliographystyle = p.verbatim_item();
// If any other command than \bibliography, \addcontentsline
"\\bibliographystyle{" + bibliographystyle + '}',
context);
}
+ continue;
}
- else if (t.cs() == "phantomsection") {
+ if (t.cs() == "phantomsection") {
// we only support this if it occurs between
// \bibliographystyle and \bibliography
if (bibliographystyle.empty())
output_ert_inset(os, "\\phantomsection", context);
+ continue;
}
- else if (t.cs() == "addcontentsline") {
+ if (t.cs() == "addcontentsline") {
context.check_layout(os);
// get the 3 arguments of \addcontentsline
string const one = p.getArg('{', '}');
"\\addcontentsline{" + one + "}{" + two + "}{"+ three + '}',
context);
}
+ continue;
}
else if (t.cs() == "bibliography") {
// \nocite{*} option
btprint.clear();
}
- os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
+ os << "bibfiles " << '"' << normalize_filename(p.verbatim_item()) << '"' << "\n";
// Do we have addcontentsline?
if (contentslineContent == "\\refname") {
BibOpts = "bibtotoc";
// Do we have a bibliographystyle set?
if (!bibliographystyle.empty()) {
if (BibOpts.empty())
- BibOpts = bibliographystyle;
+ BibOpts = normalize_filename(bibliographystyle);
else
- BibOpts = BibOpts + ',' + bibliographystyle;
+ BibOpts = BibOpts + ',' + normalize_filename(bibliographystyle);
// clear it because each bibtex entry has its style
// and we need an empty string to handle \phantomsection
bibliographystyle.clear();
}
os << "options " << '"' << BibOpts << '"' << "\n";
end_inset(os);
+ continue;
}
- else if (t.cs() == "parbox") {
+ if (t.cs() == "printbibliography") {
+ context.check_layout(os);
+ string BibOpts;
+ string bbloptions = p.hasOpt() ? p.getArg('[', ']') : string();
+ vector<string> opts = getVectorFromString(bbloptions);
+ vector<string>::iterator it =
+ find(opts.begin(), opts.end(), "heading=bibintoc");
+ if (it != opts.end()) {
+ opts.erase(it);
+ BibOpts = "bibtotoc";
+ }
+ bbloptions = getStringFromVector(opts);
+ begin_command_inset(os, "bibtex", "bibtex");
+ if (!btprint.empty()) {
+ os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
+ // clear the string because the next BibTeX inset can be without the
+ // \nocite{*} option
+ btprint.clear();
+ }
+ string bibfiles;
+ for (auto const & bf : preamble.biblatex_bibliographies) {
+ if (!bibfiles.empty())
+ bibfiles += ",";
+ bibfiles += normalize_filename(bf);
+ }
+ if (!bibfiles.empty())
+ os << "bibfiles " << '"' << bibfiles << '"' << "\n";
+ // Do we have addcontentsline?
+ if (contentslineContent == "\\refname") {
+ BibOpts = "bibtotoc";
+ // clear string because next BibTeX inset can be without addcontentsline
+ contentslineContent.clear();
+ }
+ os << "options " << '"' << BibOpts << '"' << "\n";
+ if (!bbloptions.empty())
+ os << "biblatexopts " << '"' << bbloptions << '"' << "\n";
+ end_inset(os);
+ need_commentbib = false;
+ continue;
+ }
+
+ if (t.cs() == "bibbysection") {
+ context.check_layout(os);
+ string BibOpts;
+ string bbloptions = p.hasOpt() ? p.getArg('[', ']') : string();
+ vector<string> opts = getVectorFromString(bbloptions);
+ vector<string>::iterator it =
+ find(opts.begin(), opts.end(), "heading=bibintoc");
+ if (it != opts.end()) {
+ opts.erase(it);
+ BibOpts = "bibtotoc";
+ }
+ bbloptions = getStringFromVector(opts);
+ begin_command_inset(os, "bibtex", "bibtex");
+ os << "btprint " << '"' << "bibbysection" << '"' << "\n";
+ string bibfiles;
+ for (auto const & bf : preamble.biblatex_bibliographies) {
+ if (!bibfiles.empty())
+ bibfiles += ",";
+ bibfiles += normalize_filename(bf);
+ }
+ if (!bibfiles.empty())
+ os << "bibfiles " << '"' << bibfiles << '"' << "\n";
+ os << "options " << '"' << BibOpts << '"' << "\n";
+ if (!bbloptions.empty())
+ os << "biblatexopts " << '"' << bbloptions << '"' << "\n";
+ end_inset(os);
+ need_commentbib = false;
+ continue;
+ }
+
+ if (t.cs() == "parbox") {
// Test whether this is an outer box of a shaded box
p.pushPosition();
// swallow arguments
} else
parse_box(p, os, 0, FLAG_ITEM, outer, context,
"", "", t.cs(), "", "");
+ continue;
}
- else if (t.cs() == "fbox" || t.cs() == "mbox" ||
- t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
- t.cs() == "shadowbox" || t.cs() == "doublebox")
+ if (t.cs() == "fbox" || t.cs() == "mbox" ||
+ t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
+ t.cs() == "shadowbox" || t.cs() == "doublebox") {
parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), "");
+ continue;
+ }
- else if (t.cs() == "fcolorbox" || t.cs() == "colorbox") {
+ if (t.cs() == "fcolorbox" || t.cs() == "colorbox") {
string backgroundcolor;
preamble.registerAutomaticallyLoadedPackage("xcolor");
if (t.cs() == "fcolorbox") {
backgroundcolor = p.getArg('{', '}');
parse_box(p, os, 0, 0, outer, context, "", "", "", "", backgroundcolor);
}
+ continue;
}
// FIXME: due to the compiler limit of "if" nestings
// the code for the alignment was put here
// put them in their own if if this is fixed
- else if (t.cs() == "fboxrule" || t.cs() == "fboxsep"
- || t.cs() == "shadowsize"
- || t.cs() == "raggedleft" || t.cs() == "centering"
- || t.cs() == "raggedright") {
- p.skip_spaces(true);
+ if (t.cs() == "fboxrule" || t.cs() == "fboxsep"
+ || t.cs() == "shadowsize"
+ || t.cs() == "raggedleft" || t.cs() == "centering"
+ || t.cs() == "raggedright") {
if (t.cs() == "fboxrule")
fboxrule = "";
if (t.cs() == "fboxsep")
shadow_size = "";
if (t.cs() != "raggedleft" && t.cs() != "centering"
&& t.cs() != "raggedright") {
+ p.skip_spaces(true);
while (p.good() && p.next_token().cat() != catSpace
&& p.next_token().cat() != catNewline
&& p.next_token().cat() != catEscape) {
shadow_size = shadow_size + p.get_token().asInput();
}
} else {
- // we only handle them if they are in a box
- if (!wasBoxAlign)
- output_ert_inset(os, '\\' + t.cs() + ' ', context);
+ output_ert_inset(os, t.asInput(), context);
}
- wasBoxAlign = false;
+ continue;
}
//\framebox() is part of the picture environment and different from \framebox{}
//\framebox{} will be parsed by parse_outer_box
- else if (t.cs() == "framebox") {
+ if (t.cs() == "framebox") {
if (p.next_token().character() == '(') {
//the syntax is: \framebox(x,y)[position]{content}
string arg = t.asInput();
parse_outer_box(p, os, FLAG_ITEM, outer,
context, t.cs(), special);
}
+ continue;
}
//\makebox() is part of the picture environment and different from \makebox{}
//\makebox{} will be parsed by parse_box
- else if (t.cs() == "makebox") {
+ if (t.cs() == "makebox") {
if (p.next_token().character() == '(') {
//the syntax is: \makebox(x,y)[position]{content}
string arg = t.asInput();
//the syntax is: \makebox[width][position]{content}
parse_box(p, os, 0, FLAG_ITEM, outer, context,
"", "", t.cs(), "", "");
+ continue;
}
- else if (t.cs() == "smallskip" ||
- t.cs() == "medskip" ||
- t.cs() == "bigskip" ||
- t.cs() == "vfill") {
+ if (t.cs() == "smallskip" ||
+ t.cs() == "medskip" ||
+ t.cs() == "bigskip" ||
+ t.cs() == "vfill") {
context.check_layout(os);
begin_inset(os, "VSpace ");
os << t.cs();
end_inset(os);
skip_spaces_braces(p);
+ continue;
}
- else if ((where = is_known(t.cs(), known_spaces))) {
+ if ((where = is_known(t.cs(), known_spaces))) {
context.check_layout(os);
begin_inset(os, "space ");
os << '\\' << known_coded_spaces[where - known_spaces]
// remove the braces after "\\,", too.
if (t.cs() != " ")
skip_braces(p);
+ continue;
}
- else if (t.cs() == "newpage" ||
- (t.cs() == "pagebreak" && !p.hasOpt()) ||
- t.cs() == "clearpage" ||
- t.cs() == "cleardoublepage") {
+ if (t.cs() == "newpage" ||
+ (t.cs() == "pagebreak" && !p.hasOpt()) ||
+ t.cs() == "clearpage" ||
+ t.cs() == "cleardoublepage") {
context.check_layout(os);
begin_inset(os, "Newpage ");
os << t.cs();
end_inset(os);
skip_spaces_braces(p);
+ continue;
}
- else if (t.cs() == "DeclareRobustCommand" ||
+ if (t.cs() == "DeclareRobustCommand" ||
t.cs() == "DeclareRobustCommandx" ||
t.cs() == "newcommand" ||
t.cs() == "newcommandx" ||
os << "\n" << ert;
end_inset(os);
}
+ continue;
}
- else if (t.cs() == "let" && p.next_token().asInput() != "*") {
+ if (t.cs() == "let" && p.next_token().asInput() != "*") {
// let could be handled by parse_command(),
// but we need to call add_known_command() here.
string ert = t.asInput();
if (it != known_commands.end())
known_commands[t.asInput()] = it->second;
output_ert_inset(os, ert, context);
+ continue;
}
- else if (t.cs() == "hspace" || t.cs() == "vspace") {
+ if (t.cs() == "hspace" || t.cs() == "vspace") {
if (starred)
p.get_token();
string name = t.asInput();
// therefore handle them separately
if (unit == "\\paperwidth" || unit == "\\columnwidth"
|| unit == "\\textwidth" || unit == "\\linewidth"
- || unit == "\\textheight" || unit == "\\paperheight")
+ || unit == "\\textheight" || unit == "\\paperheight"
+ || unit == "\\baselineskip")
known_unit = true;
break;
}
} else
output_ert_inset(os, name + '{' + length + '}', context);
}
+ continue;
}
// The single '=' is meant here.
- else if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true))) {
+ if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true))) {
if (starred)
p.get_token();
p.skip_spaces();
docstring const name = newinsetlayout->name();
bool const caption = name.find(from_ascii("Caption:")) == 0;
if (caption) {
- begin_inset(os, "Caption ");
- os << to_utf8(name.substr(8)) << '\n';
+ // Already done for floating minted listings.
+ if (minted_float.empty()) {
+ begin_inset(os, "Caption ");
+ os << to_utf8(name.substr(8)) << '\n';
+ }
} else {
begin_inset(os, "Flex ");
os << to_utf8(name) << '\n'
<< "status collapsed\n";
}
- if (newinsetlayout->isPassThru()) {
+ if (!minted_float.empty()) {
+ parse_text_snippet(p, os, FLAG_ITEM, false, context);
+ } else if (newinsetlayout->isPassThru()) {
// set catcodes to verbatim early, just in case.
p.setCatcodes(VERBATIM_CATCODES);
string delim = p.get_token().asInput();
parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
if (caption)
p.skip_spaces();
- end_inset(os);
+ // Minted caption insets are not closed here because
+ // we collect everything into the caption.
+ if (minted_float.empty())
+ end_inset(os);
+ continue;
}
- else if (t.cs() == "includepdf") {
+ if (t.cs() == "includepdf") {
p.skip_spaces();
string const arg = p.getArg('[', ']');
map<string, string> opts;
end_inset(os);
context.check_layout(os);
registerExternalTemplatePackages("PDFPages");
+ continue;
}
- else if (t.cs() == "loadgame") {
+ if (t.cs() == "loadgame") {
p.skip_spaces();
string name = normalize_filename(p.verbatim_item());
string const path = getMasterFilePath(true);
if (p.get_token().asInput() == "showboard")
p.get_token();
registerExternalTemplatePackages("ChessDiagram");
+ continue;
}
- else {
- // try to see whether the string is in unicodesymbols
- // Only use text mode commands, since we are in text mode here,
- // and math commands may be invalid (bug 6797)
- string name = t.asInput();
- // handle the dingbats, cyrillic and greek
- if (name == "\\ding" || name == "\\textcyr" ||
- (name == "\\textgreek" && !preamble.usePolyglossia()))
- name = name + '{' + p.getArg('{', '}') + '}';
- // handle the ifsym characters
- else if (name == "\\textifsymbol") {
- string const optif = p.getFullOpt();
- string const argif = p.getArg('{', '}');
- name = name + optif + '{' + argif + '}';
- }
- // handle the \ascii characters
- // the case of \ascii within braces, as LyX outputs it, is already
- // handled for t.cat() == catBegin
- else if (name == "\\ascii") {
- // the code is "\asci\xxx"
- name = "{" + name + p.get_token().asInput() + "}";
+ // try to see whether the string is in unicodesymbols
+ // Only use text mode commands, since we are in text mode here,
+ // and math commands may be invalid (bug 6797)
+ string name = t.asInput();
+ // handle the dingbats, cyrillic and greek
+ if (name == "\\ding" || name == "\\textcyr" ||
+ (name == "\\textgreek" && !preamble.usePolyglossia()))
+ name = name + '{' + p.getArg('{', '}') + '}';
+ // handle the ifsym characters
+ else if (name == "\\textifsymbol") {
+ string const optif = p.getFullOpt();
+ string const argif = p.getArg('{', '}');
+ name = name + optif + '{' + argif + '}';
+ }
+ // handle the \ascii characters
+ // the case of \ascii within braces, as LyX outputs it, is already
+ // handled for t.cat() == catBegin
+ else if (name == "\\ascii") {
+ // the code is "\asci\xxx"
+ name = "{" + name + p.get_token().asInput() + "}";
+ skip_braces(p);
+ }
+ // handle some TIPA special characters
+ else if (preamble.isPackageUsed("tipa")) {
+ if (name == "\\s") {
+ // fromLaTeXCommand() does not yet
+ // recognize tipa short cuts
+ name = "\\textsyllabic";
+ } else if (name == "\\=" &&
+ p.next_token().asInput() == "*") {
+ // fromLaTeXCommand() does not yet
+ // recognize tipa short cuts
+ p.get_token();
+ name = "\\textsubbar";
+ } else if (name == "\\textdoublevertline") {
+ // FIXME: This is not correct,
+ // \textvertline is higher than \textbardbl
+ name = "\\textbardbl";
skip_braces(p);
- }
- // handle some TIPA special characters
- else if (preamble.isPackageUsed("tipa")) {
- if (name == "\\textglobfall") {
- name = "End";
+ } else if (name == "\\!" ) {
+ if (p.next_token().asInput() == "b") {
+ p.get_token(); // eat 'b'
+ name = "\\texthtb";
skip_braces(p);
- } else if (name == "\\s") {
- // fromLaTeXCommand() does not yet
- // recognize tipa short cuts
- name = "\\textsyllabic";
- } else if (name == "\\=" &&
- p.next_token().asInput() == "*") {
- // fromLaTeXCommand() does not yet
- // recognize tipa short cuts
+ } else if (p.next_token().asInput() == "d") {
p.get_token();
- name = "\\b";
- } else if (name == "\\textdoublevertline") {
- // FIXME: This is not correct,
- // \textvertline is higher than \textbardbl
- name = "\\textbardbl";
+ name = "\\texthtd";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "g") {
+ p.get_token();
+ name = "\\texthtg";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "G") {
+ p.get_token();
+ name = "\\texthtscg";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "j") {
+ p.get_token();
+ name = "\\texthtbardotlessj";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "o") {
+ p.get_token();
+ name = "\\textbullseye";
skip_braces(p);
- } else if (name == "\\!" ) {
- if (p.next_token().asInput() == "b") {
- p.get_token(); // eat 'b'
- name = "\\texthtb";
- skip_braces(p);
- } else if (p.next_token().asInput() == "d") {
- p.get_token();
- name = "\\texthtd";
- skip_braces(p);
- } else if (p.next_token().asInput() == "g") {
- p.get_token();
- name = "\\texthtg";
- skip_braces(p);
- } else if (p.next_token().asInput() == "G") {
- p.get_token();
- name = "\\texthtscg";
- skip_braces(p);
- } else if (p.next_token().asInput() == "j") {
- p.get_token();
- name = "\\texthtbardotlessj";
- skip_braces(p);
- } else if (p.next_token().asInput() == "o") {
- p.get_token();
- name = "\\textbullseye";
- skip_braces(p);
- }
- } else if (name == "\\*" ) {
- if (p.next_token().asInput() == "k") {
- p.get_token();
- name = "\\textturnk";
- skip_braces(p);
- } else if (p.next_token().asInput() == "r") {
- p.get_token(); // eat 'b'
- name = "\\textturnr";
- skip_braces(p);
- } else if (p.next_token().asInput() == "t") {
- p.get_token();
- name = "\\textturnt";
- skip_braces(p);
- } else if (p.next_token().asInput() == "w") {
- p.get_token();
- name = "\\textturnw";
- skip_braces(p);
- }
}
- }
- if ((name.size() == 2 &&
- contains("\"'.=^`bcdHkrtuv~", name[1]) &&
- p.next_token().asInput() != "*") ||
- is_known(name.substr(1), known_tipa_marks)) {
- // name is a command that corresponds to a
- // combining character in unicodesymbols.
- // Append the argument, fromLaTeXCommand()
- // will either convert it to a single
- // character or a combining sequence.
- name += '{' + p.verbatim_item() + '}';
- }
- // now get the character from unicodesymbols
- bool termination;
- docstring rem;
- set<string> req;
- docstring s = encodings.fromLaTeXCommand(from_utf8(name),
- Encodings::TEXT_CMD, termination, rem, &req);
- if (!s.empty()) {
- context.check_layout(os);
- os << to_utf8(s);
- if (!rem.empty())
- output_ert_inset(os, to_utf8(rem), context);
- if (termination)
- skip_spaces_braces(p);
- for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
- preamble.registerAutomaticallyLoadedPackage(*it);
- }
- //cerr << "#: " << t << " mode: " << mode << endl;
- // heuristic: read up to next non-nested space
- /*
- string s = t.asInput();
- string z = p.verbatim_item();
- while (p.good() && z != " " && !z.empty()) {
- //cerr << "read: " << z << endl;
- s += z;
- z = p.verbatim_item();
- }
- cerr << "found ERT: " << s << endl;
- output_ert_inset(os, s + ' ', context);
- */
- else {
- if (t.asInput() == name &&
- p.next_token().asInput() == "*") {
- // Starred commands like \vspace*{}
- p.get_token(); // Eat '*'
- name += '*';
+ } else if (name == "\\*" ) {
+ if (p.next_token().asInput() == "k") {
+ p.get_token();
+ name = "\\textturnk";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "r") {
+ p.get_token(); // eat 'b'
+ name = "\\textturnr";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "t") {
+ p.get_token();
+ name = "\\textturnt";
+ skip_braces(p);
+ } else if (p.next_token().asInput() == "w") {
+ p.get_token();
+ name = "\\textturnw";
+ skip_braces(p);
}
- if (!parse_command(name, p, os, outer, context))
- output_ert_inset(os, name, context);
}
}
-
- if (flags & FLAG_LEAVE) {
- flags &= ~FLAG_LEAVE;
- break;
+ if ((name.size() == 2 &&
+ contains("\"'.=^`bcdHkrtuv~", name[1]) &&
+ p.next_token().asInput() != "*") ||
+ is_known(name.substr(1), known_tipa_marks)) {
+ // name is a command that corresponds to a
+ // combining character in unicodesymbols.
+ // Append the argument, fromLaTeXCommand()
+ // will either convert it to a single
+ // character or a combining sequence.
+ name += '{' + p.verbatim_item() + '}';
+ }
+ // now get the character from unicodesymbols
+ bool termination;
+ docstring rem;
+ set<string> req;
+ docstring s = normalize_c(encodings.fromLaTeXCommand(from_utf8(name),
+ Encodings::TEXT_CMD, termination, rem, &req));
+ if (!s.empty()) {
+ context.check_layout(os);
+ os << to_utf8(s);
+ if (!rem.empty())
+ output_ert_inset(os, to_utf8(rem), context);
+ if (termination)
+ skip_spaces_braces(p);
+ for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
+ preamble.registerAutomaticallyLoadedPackage(*it);
+ }
+ //cerr << "#: " << t << " mode: " << mode << endl;
+ // heuristic: read up to next non-nested space
+ /*
+ string s = t.asInput();
+ string z = p.verbatim_item();
+ while (p.good() && z != " " && !z.empty()) {
+ //cerr << "read: " << z << endl;
+ s += z;
+ z = p.verbatim_item();
+ }
+ cerr << "found ERT: " << s << endl;
+ output_ert_inset(os, s + ' ', context);
+ */
+ else {
+ if (t.asInput() == name &&
+ p.next_token().asInput() == "*") {
+ // Starred commands like \vspace*{}
+ p.get_token(); // Eat '*'
+ name += '*';
+ }
+ if (!parse_command(name, p, os, outer, context))
+ output_ert_inset(os, name, context);
}
}
}
return use->first;
}
+
+void check_comment_bib(ostream & os, Context & context)
+{
+ if (!need_commentbib)
+ return;
+ // We have a bibliography database, but no bibliography with biblatex
+ // which is completely valid. Insert a bibtex inset in a note.
+ context.check_layout(os);
+ begin_inset(os, "Note Note\n");
+ os << "status open\n";
+ os << "\\begin_layout Plain Layout\n";
+ begin_command_inset(os, "bibtex", "bibtex");
+ string bibfiles;
+ for (auto const & bf : preamble.biblatex_bibliographies) {
+ if (!bibfiles.empty())
+ bibfiles += ",";
+ bibfiles += normalize_filename(bf);
+ }
+ if (!bibfiles.empty())
+ os << "bibfiles " << '"' << bibfiles << '"' << "\n";
+ end_inset(os);// Bibtex
+ os << "\\end_layout\n";
+ end_inset(os);// Note
+}
+
// }])