2 * \file tex2lyx/text.cpp
3 * This file is part of LyX, the document processor.
4 * Licence details can be found in the file COPYING.
7 * \author Jean-Marc Lasgouttes
10 * Full author contact details are available in file CREDITS.
21 #include "FloatList.h"
22 #include "LaTeXPackages.h"
27 #include "insets/ExternalTemplate.h"
29 #include "support/lassert.h"
30 #include "support/convert.h"
31 #include "support/FileName.h"
32 #include "support/filetools.h"
33 #include "support/lstrings.h"
34 #include "support/lyxtime.h"
43 using namespace lyx::support;
50 void output_arguments(ostream &, Parser &, bool, bool, string, Context &,
51 Layout::LaTeXArgMap const &);
56 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
57 Context const & context, InsetLayout const * layout,
60 bool const forcePlainLayout =
61 layout ? layout->forcePlainLayout() : false;
62 Context newcontext(true, context.textclass);
64 newcontext.layout = &context.textclass.plainLayout();
66 newcontext.font = context.font;
68 output_arguments(os, p, outer, false, string(), newcontext,
70 // If we have a latex param, we eat it here.
71 if (!context.latexparam.empty()) {
73 Context dummy(true, context.textclass);
74 parse_text(p, oss, FLAG_RDELIM, outer, dummy,
75 string(1, context.latexparam.back()));
77 parse_text(p, os, flags, outer, newcontext, rdelim);
79 output_arguments(os, p, outer, false, "post", newcontext,
80 layout->postcommandargs());
81 newcontext.check_end_layout(os);
87 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
88 Context const & context, string const & name,
89 string const rdelim = string())
91 InsetLayout const * layout = 0;
92 DocumentClass::InsetLayouts::const_iterator it =
93 context.textclass.insetLayouts().find(from_ascii(name));
94 if (it != context.textclass.insetLayouts().end())
95 layout = &(it->second);
96 parse_text_in_inset(p, os, flags, outer, context, layout, rdelim);
99 /// parses a paragraph snippet, useful for example for \\emph{...}
100 void parse_text_snippet(Parser & p, ostream & os, unsigned flags, bool outer,
103 Context newcontext(context);
104 // Don't inherit the paragraph-level extra stuff
105 newcontext.par_extra_stuff.clear();
106 parse_text(p, os, flags, outer, newcontext);
107 // Make sure that we don't create invalid .lyx files
108 context.need_layout = newcontext.need_layout;
109 context.need_end_layout = newcontext.need_end_layout;
114 * Thin wrapper around parse_text_snippet() using a string.
116 * We completely ignore \c context.need_layout and \c context.need_end_layout,
117 * because our return value is not used directly (otherwise the stream version
118 * of parse_text_snippet() could be used). That means that the caller needs
119 * to do layout management manually.
120 * This is intended to parse text that does not create any layout changes.
122 string parse_text_snippet(Parser & p, unsigned flags, const bool outer,
125 Context newcontext(context);
126 newcontext.need_layout = false;
127 newcontext.need_end_layout = false;
128 newcontext.new_layout_allowed = false;
129 // Avoid warning by Context::~Context()
130 newcontext.par_extra_stuff.clear();
132 parse_text_snippet(p, os, flags, outer, newcontext);
136 string fboxrule = "";
138 string shadow_size = "";
140 char const * const known_ref_commands[] = { "ref", "pageref", "vref",
141 "vpageref", "prettyref", "nameref", "eqref", 0 };
143 char const * const known_coded_ref_commands[] = { "ref", "pageref", "vref",
144 "vpageref", "formatted", "nameref", "eqref", 0 };
146 char const * const known_refstyle_commands[] = { "algref", "chapref", "corref",
147 "eqref", "enuref", "figref", "fnref", "lemref", "parref", "partref", "propref",
148 "secref", "subsecref", "tabref", "thmref", 0 };
150 char const * const known_refstyle_prefixes[] = { "alg", "chap", "cor",
151 "eq", "enu", "fig", "fn", "lem", "par", "part", "prop",
152 "sec", "subsec", "tab", "thm", 0 };
156 * supported CJK encodings
157 * JIS does not work with LyX's encoding conversion
159 const char * const supported_CJK_encodings[] = {
160 "EUC-JP", "KS", "GB", "UTF8",
161 "Bg5", /*"JIS",*/ "SJIS", 0};
164 * the same as supported_CJK_encodings with their corresponding LyX language name
165 * FIXME: The mapping "UTF8" => "chinese-traditional" is only correct for files
167 * NOTE: "Bg5", "JIS" and "SJIS" are not supported by LyX, on re-export the
168 * encodings "UTF8", "EUC-JP" and "EUC-JP" will be used.
169 * please keep this in sync with supported_CJK_encodings line by line!
171 const char * const supported_CJK_languages[] = {
172 "japanese-cjk", "korean", "chinese-simplified", "chinese-traditional",
173 "chinese-traditional", /*"japanese-cjk",*/ "japanese-cjk", 0};
177 * The starred forms are also known except for "citefullauthor",
178 * "citeyear" and "citeyearpar".
180 char const * const known_natbib_commands[] = { "cite", "citet", "citep",
181 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
182 "citefullauthor", "Citet", "Citep", "Citealt", "Citealp", "Citeauthor", 0 };
186 * No starred form other than "cite*" known.
188 char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
189 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
190 // jurabib commands not (yet) supported by LyX:
192 // "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
193 // "footciteauthor", "footciteyear", "footciteyearpar",
194 "citefield", "citetitle", 0 };
198 * Known starred forms: \cite*, \citeauthor*, \Citeauthor*, \parencite*, \citetitle*.
200 char const * const known_biblatex_commands[] = { "cite", "Cite", "textcite", "Textcite",
201 "parencite", "Parencite", "citeauthor", "Citeauthor", "citeyear", "smartcite", "Smartcite",
202 "footcite", "Footcite", "autocite", "Autocite", "citetitle", "fullcite", "footfullcite",
203 "supercite", "cites", "Cites", "textcites", "Textcites", "parencites", "Parencites",
204 "smartcites", "Smartcites", "autocites", "Autocites", 0 };
206 // Whether we need to insert a bibtex inset in a comment
207 bool need_commentbib = false;
209 /// LaTeX names for quotes
210 char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
211 "guillemotright", "frqq", "fg", "glq", "glqq", "textquoteleft", "grq", "grqq",
212 "quotedblbase", "textquotedblleft", "quotesinglbase", "textquoteright", "flq",
213 "guilsinglleft", "frq", "guilsinglright", "textquotedblright", "textquotesingle",
216 /// the same as known_quotes with .lyx names
217 char const * const known_coded_quotes[] = { "qrd", "ard", "ard", "ard",
218 "ald", "ald", "ald", "gls", "gld", "els", "els", "eld",
219 "gld", "eld", "gls", "ers", "ars",
220 "ars", "als", "als", "erd", "qrs", "qrd", 0};
222 /// LaTeX names for font sizes
223 char const * const known_sizes[] = { "tiny", "scriptsize", "footnotesize",
224 "small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
226 /// the same as known_sizes with .lyx names
227 char const * const known_coded_sizes[] = { "tiny", "scriptsize", "footnotesize",
228 "small", "normal", "large", "larger", "largest", "huge", "giant", 0};
230 /// LaTeX 2.09 names for font families
231 char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
233 /// LaTeX names for font families
234 char const * const known_font_families[] = { "rmfamily", "sffamily",
237 /// LaTeX names for font family changing commands
238 char const * const known_text_font_families[] = { "textrm", "textsf",
241 /// The same as known_old_font_families, known_font_families and
242 /// known_text_font_families with .lyx names
243 char const * const known_coded_font_families[] = { "roman", "sans",
246 /// LaTeX 2.09 names for font series
247 char const * const known_old_font_series[] = { "bf", 0};
249 /// LaTeX names for font series
250 char const * const known_font_series[] = { "bfseries", "mdseries", 0};
252 /// LaTeX names for font series changing commands
253 char const * const known_text_font_series[] = { "textbf", "textmd", 0};
255 /// The same as known_old_font_series, known_font_series and
256 /// known_text_font_series with .lyx names
257 char const * const known_coded_font_series[] = { "bold", "medium", 0};
259 /// LaTeX 2.09 names for font shapes
260 char const * const known_old_font_shapes[] = { "it", "sl", "sc", 0};
262 /// LaTeX names for font shapes
263 char const * const known_font_shapes[] = { "itshape", "slshape", "scshape",
266 /// LaTeX names for font shape changing commands
267 char const * const known_text_font_shapes[] = { "textit", "textsl", "textsc",
270 /// The same as known_old_font_shapes, known_font_shapes and
271 /// known_text_font_shapes with .lyx names
272 char const * const known_coded_font_shapes[] = { "italic", "slanted",
273 "smallcaps", "up", 0};
275 /// Known special characters which need skip_spaces_braces() afterwards
276 char const * const known_special_chars[] = {"ldots",
277 "lyxarrow", "textcompwordmark",
278 "slash", "textasciitilde", "textasciicircum", "textbackslash",
279 "LyX", "TeX", "LaTeXe",
282 /// special characters from known_special_chars which may have a \\protect before
283 char const * const known_special_protect_chars[] = {"LyX", "TeX",
284 "LaTeXe", "LaTeX", 0};
286 /// the same as known_special_chars with .lyx names
287 char const * const known_coded_special_chars[] = {"\\SpecialChar ldots\n",
288 "\\SpecialChar menuseparator\n", "\\SpecialChar ligaturebreak\n",
289 "\\SpecialChar breakableslash\n", "~", "^", "\n\\backslash\n",
290 "\\SpecialChar LyX\n", "\\SpecialChar TeX\n", "\\SpecialChar LaTeX2e\n",
291 "\\SpecialChar LaTeX\n", 0};
294 * Graphics file extensions known by the dvips driver of the graphics package.
295 * These extensions are used to complete the filename of an included
296 * graphics file if it does not contain an extension.
297 * The order must be the same that latex uses to find a file, because we
298 * will use the first extension that matches.
299 * This is only an approximation for the common cases. If we would want to
300 * do it right in all cases, we would need to know which graphics driver is
301 * used and know the extensions of every driver of the graphics package.
303 char const * const known_dvips_graphics_formats[] = {"eps", "ps", "eps.gz",
304 "ps.gz", "eps.Z", "ps.Z", 0};
307 * Graphics file extensions known by the pdftex driver of the graphics package.
308 * \sa known_dvips_graphics_formats
310 char const * const known_pdftex_graphics_formats[] = {"png", "pdf", "jpg",
314 * Known file extensions for TeX files as used by \\include.
316 char const * const known_tex_extensions[] = {"tex", 0};
318 /// spaces known by InsetSpace
319 char const * const known_spaces[] = { " ", "space", ",",
320 "thinspace", "quad", "qquad", "enspace", "enskip",
321 "negthinspace", "negmedspace", "negthickspace", "textvisiblespace",
322 "hfill", "dotfill", "hrulefill", "leftarrowfill", "rightarrowfill",
323 "upbracefill", "downbracefill", 0};
325 /// the same as known_spaces with .lyx names
326 char const * const known_coded_spaces[] = { "space{}", "space{}",
327 "thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
328 "negthinspace{}", "negmedspace{}", "negthickspace{}", "textvisiblespace{}",
329 "hfill{}", "dotfill{}", "hrulefill{}", "leftarrowfill{}", "rightarrowfill{}",
330 "upbracefill{}", "downbracefill{}", 0};
332 /// known TIPA combining diacritical marks
333 char const * const known_tipa_marks[] = {"textsubwedge", "textsubumlaut",
334 "textsubtilde", "textseagull", "textsubbridge", "textinvsubbridge",
335 "textsubsquare", "textsubrhalfring", "textsublhalfring", "textsubplus",
336 "textovercross", "textsubarch", "textsuperimposetilde", "textraising",
337 "textlowering", "textadvancing", "textretracting", "textdoublegrave",
338 "texthighrise", "textlowrise", "textrisefall", "textsyllabic",
339 "textsubring", "textsubbar", 0};
341 /// TIPA tones that need special handling
342 char const * const known_tones[] = {"15", "51", "45", "12", "454", 0};
344 // string to store the float type to be able to determine the type of subfloats
345 string float_type = "";
347 // string to store the float status of minted listings
348 string minted_float = "";
350 // whether a caption has been parsed for a floating minted listing
351 bool minted_float_has_caption = false;
353 // The caption for non-floating minted listings
354 string minted_nonfloat_caption = "";
356 // Characters that have to be escaped by \\ in LaTeX
357 char const * const known_escaped_chars[] = {
358 "&", "_", "$", "%", "#", "^", "{", "}", 0};
361 /// splits "x=z, y=b" into a map and an ordered keyword vector
362 void split_map(string const & s, map<string, string> & res, vector<string> & keys)
367 keys.resize(v.size());
368 for (size_t i = 0; i < v.size(); ++i) {
369 size_t const pos = v[i].find('=');
370 string const index = trimSpaceAndEol(v[i].substr(0, pos));
371 string const value = trimSpaceAndEol(v[i].substr(pos + 1, string::npos));
379 * Split a LaTeX length into value and unit.
380 * The latter can be a real unit like "pt", or a latex length variable
381 * like "\textwidth". The unit may contain additional stuff like glue
382 * lengths, but we don't care, because such lengths are ERT anyway.
383 * \returns true if \p value and \p unit are valid.
385 bool splitLatexLength(string const & len, string & value, string & unit)
389 const string::size_type i = len.find_first_not_of(" -+0123456789.,");
390 //'4,5' is a valid LaTeX length number. Change it to '4.5'
391 string const length = subst(len, ',', '.');
392 if (i == string::npos)
395 if (len[0] == '\\') {
396 // We had something like \textwidth without a factor
402 value = trimSpaceAndEol(string(length, 0, i));
406 // 'cM' is a valid LaTeX length unit. Change it to 'cm'
407 if (contains(len, '\\'))
408 unit = trimSpaceAndEol(string(len, i));
410 unit = ascii_lowercase(trimSpaceAndEol(string(len, i)));
415 /// A simple function to translate a latex length to something LyX can
416 /// understand. Not perfect, but rather best-effort.
417 bool translate_len(string const & length, string & valstring, string & unit)
419 if (!splitLatexLength(length, valstring, unit))
421 // LyX uses percent values
423 istringstream iss(valstring);
428 string const percentval = oss.str();
430 if (unit.empty() || unit[0] != '\\')
432 string::size_type const i = unit.find(' ');
433 string const endlen = (i == string::npos) ? string() : string(unit, i);
434 if (unit == "\\textwidth") {
435 valstring = percentval;
436 unit = "text%" + endlen;
437 } else if (unit == "\\columnwidth") {
438 valstring = percentval;
439 unit = "col%" + endlen;
440 } else if (unit == "\\paperwidth") {
441 valstring = percentval;
442 unit = "page%" + endlen;
443 } else if (unit == "\\linewidth") {
444 valstring = percentval;
445 unit = "line%" + endlen;
446 } else if (unit == "\\paperheight") {
447 valstring = percentval;
448 unit = "pheight%" + endlen;
449 } else if (unit == "\\textheight") {
450 valstring = percentval;
451 unit = "theight%" + endlen;
452 } else if (unit == "\\baselineskip") {
453 valstring = percentval;
454 unit = "baselineskip%" + endlen;
460 /// If we have ambiguous quotation marks, make a smart guess
461 /// based on main quote style
462 string guessQuoteStyle(string in, bool const opening)
465 if (prefixIs(in, "qr")) {// straight quote
467 res = subst(res, "r", "l");
468 } else if (in == "eld") {// ``
469 if (preamble.quotesStyle() == "german")
471 else if (preamble.quotesStyle() == "british")
473 else if (preamble.quotesStyle() == "french")
475 else if (preamble.quotesStyle() == "russian")
477 } else if (in == "erd") {// ''
478 if (preamble.quotesStyle() == "polish")
480 else if (preamble.quotesStyle() == "british")
482 else if (preamble.quotesStyle() == "french")
484 else if (preamble.quotesStyle() == "swedish")
485 res = opening ? "sld" : "srd";
486 } else if (in == "els") {// `
487 if (preamble.quotesStyle() == "german")
489 else if (preamble.quotesStyle() == "british")
491 } else if (in == "ers") {// '
492 if (preamble.quotesStyle() == "polish")
494 else if (preamble.quotesStyle() == "british")
496 else if (preamble.quotesStyle() == "swedish")
497 res = opening ? "sls" : "srs";
498 } else if (in == "ard") {// >>
499 if (preamble.quotesStyle() == "swiss")
501 else if (preamble.quotesStyle() == "french")
503 else if (preamble.quotesStyle() == "russian")
505 } else if (in == "ald") {// <<
506 if (preamble.quotesStyle() == "swiss")
508 else if (preamble.quotesStyle() == "french")
510 else if (preamble.quotesStyle() == "russian")
512 } else if (in == "ars") {// >
513 if (preamble.quotesStyle() == "swiss")
515 } else if (in == "als") {// <
516 if (preamble.quotesStyle() == "swiss")
518 } else if (in == "gld") {// ,,
519 if (preamble.quotesStyle() == "polish")
521 else if (preamble.quotesStyle() == "russian")
523 } else if (in == "gls") {// ,
524 if (preamble.quotesStyle() == "polish")
534 string translate_len(string const & length)
538 if (translate_len(length, value, unit))
540 // If the input is invalid, return what we have.
548 * Translates a LaTeX length into \p value, \p unit and
549 * \p special parts suitable for a box inset.
550 * The difference from translate_len() is that a box inset knows about
551 * some special "units" that are stored in \p special.
553 void translate_box_len(string const & length, string & value, string & unit, string & special)
555 if (translate_len(length, value, unit)) {
556 if (unit == "\\height" || unit == "\\depth" ||
557 unit == "\\totalheight" || unit == "\\width") {
558 special = unit.substr(1);
559 // The unit is not used, but LyX requires a dummy setting
571 void begin_inset(ostream & os, string const & name)
573 os << "\n\\begin_inset " << name;
577 void begin_command_inset(ostream & os, string const & name,
578 string const & latexname)
580 begin_inset(os, "CommandInset ");
581 os << name << "\nLatexCommand " << latexname << '\n';
585 void end_inset(ostream & os)
587 os << "\n\\end_inset\n\n";
591 bool skip_braces(Parser & p)
593 if (p.next_token().cat() != catBegin)
596 if (p.next_token().cat() == catEnd) {
605 /// replace LaTeX commands in \p s from the unicodesymbols file with their
607 pair<bool, docstring> convert_unicodesymbols(docstring s)
611 for (size_t i = 0; i < s.size();) {
620 docstring parsed = normalize_c(encodings.fromLaTeXCommand(s,
621 Encodings::TEXT_CMD, termination, rem, &req));
622 set<string>::const_iterator it = req.begin();
623 set<string>::const_iterator en = req.end();
624 for (; it != en; ++it)
625 preamble.registerAutomaticallyLoadedPackage(*it);
628 if (s.empty() || s[0] != '\\')
632 for (auto const & c : known_escaped_chars)
633 if (c != 0 && prefixIs(s, from_ascii("\\") + c))
638 return make_pair(res, os.str());
642 /// try to convert \p s to a valid InsetCommand argument
643 /// return whether this succeeded. If not, these command insets
644 /// get the "literate" flag.
645 pair<bool, string> convert_latexed_command_inset_arg(string s)
647 bool success = false;
649 // since we don't know the input encoding we can't use from_utf8
650 pair<bool, docstring> res = convert_unicodesymbols(from_ascii(s));
652 s = to_utf8(res.second);
654 // LyX cannot handle newlines in a latex command
655 return make_pair(success, subst(s, "\n", " "));
658 /// try to convert \p s to a valid InsetCommand argument
659 /// without trying to recode macros.
660 string convert_literate_command_inset_arg(string s)
662 // LyX cannot handle newlines in a latex command
663 return subst(s, "\n", " ");
666 void output_ert(ostream & os, string const & s, Context & context)
668 context.check_layout(os);
669 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
671 os << "\n\\backslash\n";
672 else if (*it == '\n') {
673 context.new_paragraph(os);
674 context.check_layout(os);
678 context.check_end_layout(os);
682 void output_ert_inset(ostream & os, string const & s, Context & context)
684 // We must have a valid layout before outputting the ERT inset.
685 context.check_layout(os);
686 Context newcontext(true, context.textclass);
687 InsetLayout const & layout = context.textclass.insetLayout(from_ascii("ERT"));
688 if (layout.forcePlainLayout())
689 newcontext.layout = &context.textclass.plainLayout();
690 begin_inset(os, "ERT");
691 os << "\nstatus collapsed\n";
692 output_ert(os, s, newcontext);
697 void output_comment(Parser & p, ostream & os, string const & s,
700 if (p.next_token().cat() == catNewline)
701 output_ert_inset(os, '%' + s, context);
703 output_ert_inset(os, '%' + s + '\n', context);
707 Layout const * findLayout(TextClass const & textclass, string const & name, bool command,
708 string const & latexparam = string())
710 Layout const * layout = findLayoutWithoutModule(textclass, name, command, latexparam);
713 if (checkModule(name, command))
714 return findLayoutWithoutModule(textclass, name, command, latexparam);
719 InsetLayout const * findInsetLayout(TextClass const & textclass, string const & name, bool command,
720 string const & latexparam = string())
722 InsetLayout const * insetlayout =
723 findInsetLayoutWithoutModule(textclass, name, command, latexparam);
726 if (checkModule(name, command))
727 return findInsetLayoutWithoutModule(textclass, name, command, latexparam);
732 void eat_whitespace(Parser &, ostream &, Context &, bool);
736 * Skips whitespace and braces.
737 * This should be called after a command has been parsed that is not put into
738 * ERT, and where LyX adds "{}" if needed.
740 void skip_spaces_braces(Parser & p, bool keepws = false)
742 /* The following four examples produce the same typeset output and
743 should be handled by this function:
751 // Unfortunately we need to skip comments, too.
752 // We can't use eat_whitespace since writing them after the {}
753 // results in different output in some cases.
754 bool const skipped_spaces = p.skip_spaces(true);
755 bool const skipped_braces = skip_braces(p);
756 if (keepws && skipped_spaces && !skipped_braces)
757 // put back the space (it is better handled by check_space)
758 p.unskip_spaces(true);
762 void output_arguments(ostream & os, Parser & p, bool outer, bool need_layout, string const prefix,
763 Context & context, Layout::LaTeXArgMap const & latexargs)
765 if (context.layout->latextype != LATEX_ITEM_ENVIRONMENT || !prefix.empty()) {
767 context.check_layout(os);
773 Layout::LaTeXArgMap::const_iterator lait = latexargs.begin();
774 Layout::LaTeXArgMap::const_iterator const laend = latexargs.end();
775 for (; lait != laend; ++lait) {
777 eat_whitespace(p, os, context, false);
778 if (lait->second.mandatory) {
779 if (p.next_token().cat() != catBegin)
781 string ldelim = to_utf8(lait->second.ldelim);
782 string rdelim = to_utf8(lait->second.rdelim);
787 p.get_token(); // eat ldelim
788 if (ldelim.size() > 1)
789 p.get_token(); // eat ldelim
791 context.check_layout(os);
794 begin_inset(os, "Argument ");
797 os << i << "\nstatus collapsed\n\n";
798 parse_text_in_inset(p, os, FLAG_RDELIM, outer, context, 0, rdelim);
801 string ldelim = to_utf8(lait->second.ldelim);
802 string rdelim = to_utf8(lait->second.rdelim);
807 string tok = p.next_token().asInput();
808 // we only support delimiters with max 2 chars for now.
809 if (ldelim.size() > 1)
810 tok += p.next_next_token().asInput();
811 if (p.next_token().cat() == catEscape || tok != ldelim)
813 p.get_token(); // eat ldelim
814 if (ldelim.size() > 1)
815 p.get_token(); // eat ldelim
817 context.check_layout(os);
820 begin_inset(os, "Argument ");
823 os << i << "\nstatus collapsed\n\n";
824 parse_text_in_inset(p, os, FLAG_RDELIM, outer, context, 0, rdelim);
827 eat_whitespace(p, os, context, false);
832 void output_command_layout(ostream & os, Parser & p, bool outer,
833 Context & parent_context,
834 Layout const * newlayout)
836 TeXFont const oldFont = parent_context.font;
837 // save the current font size
838 string const size = oldFont.size;
839 // reset the font size to default, because the font size switches
840 // don't affect section headings and the like
841 parent_context.font.size = Context::normalfont.size;
842 // we only need to write the font change if we have an open layout
843 if (!parent_context.atParagraphStart())
844 output_font_change(os, oldFont, parent_context.font);
845 parent_context.check_end_layout(os);
846 Context context(true, parent_context.textclass, newlayout,
847 parent_context.layout, parent_context.font);
848 if (parent_context.deeper_paragraph) {
849 // We are beginning a nested environment after a
850 // deeper paragraph inside the outer list environment.
851 // Therefore we don't need to output a "begin deeper".
852 context.need_end_deeper = true;
854 context.check_deeper(os);
855 output_arguments(os, p, outer, true, string(), context,
856 context.layout->latexargs());
857 // If we have a latex param, we eat it here.
858 if (!parent_context.latexparam.empty()) {
860 Context dummy(true, parent_context.textclass);
861 parse_text(p, oss, FLAG_RDELIM, outer, dummy,
862 string(1, parent_context.latexparam.back()));
864 parse_text(p, os, FLAG_ITEM, outer, context);
865 output_arguments(os, p, outer, false, "post", context,
866 context.layout->postcommandargs());
867 context.check_end_layout(os);
868 if (parent_context.deeper_paragraph) {
869 // We must suppress the "end deeper" because we
870 // suppressed the "begin deeper" above.
871 context.need_end_deeper = false;
873 context.check_end_deeper(os);
874 // We don't need really a new paragraph, but
875 // we must make sure that the next item gets a \begin_layout.
876 parent_context.new_paragraph(os);
877 // Set the font size to the original value. No need to output it here
878 // (Context::begin_layout() will do that if needed)
879 parent_context.font.size = size;
884 * Output a space if necessary.
885 * This function gets called for every whitespace token.
887 * We have three cases here:
888 * 1. A space must be suppressed. Example: The lyxcode case below
889 * 2. A space may be suppressed. Example: Spaces before "\par"
890 * 3. A space must not be suppressed. Example: A space between two words
892 * We currently handle only 1. and 3 and from 2. only the case of
893 * spaces before newlines as a side effect.
895 * 2. could be used to suppress as many spaces as possible. This has two effects:
896 * - Reimporting LyX generated LaTeX files changes almost no whitespace
897 * - Superflous whitespace from non LyX generated LaTeX files is removed.
898 * The drawback is that the logic inside the function becomes
899 * complicated, and that is the reason why it is not implemented.
901 void check_space(Parser & p, ostream & os, Context & context)
903 Token const next = p.next_token();
904 Token const curr = p.curr_token();
905 // A space before a single newline and vice versa must be ignored
906 // LyX emits a newline before \end{lyxcode}.
907 // This newline must be ignored,
908 // otherwise LyX will add an additional protected space.
909 if (next.cat() == catSpace ||
910 next.cat() == catNewline ||
911 (next.cs() == "end" && context.layout->free_spacing && curr.cat() == catNewline)) {
914 context.check_layout(os);
920 * Parse all arguments of \p command
922 void parse_arguments(string const & command,
923 vector<ArgumentType> const & template_arguments,
924 Parser & p, ostream & os, bool outer, Context & context)
926 string ert = command;
927 size_t no_arguments = template_arguments.size();
928 for (size_t i = 0; i < no_arguments; ++i) {
929 switch (template_arguments[i]) {
932 // This argument contains regular LaTeX
933 output_ert_inset(os, ert + '{', context);
934 eat_whitespace(p, os, context, false);
935 if (template_arguments[i] == required)
936 parse_text(p, os, FLAG_ITEM, outer, context);
938 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
942 // This argument consists only of a single item.
943 // The presence of '{' or not must be preserved.
945 if (p.next_token().cat() == catBegin)
946 ert += '{' + p.verbatim_item() + '}';
948 ert += p.verbatim_item();
952 // This argument may contain special characters
953 ert += '{' + p.verbatim_item() + '}';
957 // true because we must not eat whitespace
958 // if an optional arg follows we must not strip the
959 // brackets from this one
960 if (i < no_arguments - 1 &&
961 template_arguments[i+1] == optional)
962 ert += p.getFullOpt(true);
964 ert += p.getOpt(true);
968 output_ert_inset(os, ert, context);
973 * Check whether \p command is a known command. If yes,
974 * handle the command with all arguments.
975 * \return true if the command was parsed, false otherwise.
977 bool parse_command(string const & command, Parser & p, ostream & os,
978 bool outer, Context & context)
980 if (known_commands.find(command) != known_commands.end()) {
981 parse_arguments(command, known_commands[command], p, os,
989 /// Parses a minipage or parbox
990 void parse_box(Parser & p, ostream & os, unsigned outer_flags,
991 unsigned inner_flags, bool outer, Context & parent_context,
992 string const & outer_type, string const & special,
993 string inner_type, string const & frame_color,
994 string const & background_color)
998 string hor_pos = "l";
999 // We need to set the height to the LaTeX default of 1\\totalheight
1000 // for the case when no height argument is given
1001 string height_value = "1";
1002 string height_unit = "in";
1003 string height_special = "totalheight";
1004 string latex_height;
1008 string width_special = "none";
1009 string thickness = "0.4pt";
1010 if (!fboxrule.empty())
1011 thickness = fboxrule;
1013 thickness = "0.4pt";
1015 if (!fboxsep.empty())
1016 separation = fboxsep;
1020 if (!shadow_size.empty())
1021 shadowsize = shadow_size;
1024 string framecolor = "black";
1025 string backgroundcolor = "none";
1026 if (!frame_color.empty())
1027 framecolor = frame_color;
1028 if (!background_color.empty())
1029 backgroundcolor = background_color;
1030 // if there is a color box around the \begin statements have not yet been parsed
1032 if (!frame_color.empty() || !background_color.empty()) {
1033 eat_whitespace(p, os, parent_context, false);
1034 p.get_token().asInput(); // the '{'
1036 if (p.next_token().asInput() == "\\begin") {
1037 p.get_token().asInput();
1039 inner_type = "minipage";
1040 inner_flags = FLAG_END;
1041 active_environments.push_back("minipage");
1044 else if (p.next_token().asInput() == "\\parbox") {
1045 p.get_token().asInput();
1046 inner_type = "parbox";
1047 inner_flags = FLAG_ITEM;
1050 else if (p.next_token().asInput() == "\\makebox") {
1051 p.get_token().asInput();
1052 inner_type = "makebox";
1053 inner_flags = FLAG_ITEM;
1055 // in case there is just \colorbox{color}{text}
1058 inner_type = "makebox";
1059 inner_flags = FLAG_BRACE_LAST;
1064 if (!p.hasOpt() && (inner_type == "makebox" || outer_type == "mbox"))
1066 if (!inner_type.empty() && p.hasOpt()) {
1067 if (inner_type != "makebox")
1068 position = p.getArg('[', ']');
1070 latex_width = p.getArg('[', ']');
1071 translate_box_len(latex_width, width_value, width_unit, width_special);
1074 if (position != "t" && position != "c" && position != "b") {
1075 cerr << "invalid position " << position << " for "
1076 << inner_type << endl;
1080 if (inner_type != "makebox") {
1081 latex_height = p.getArg('[', ']');
1082 translate_box_len(latex_height, height_value, height_unit, height_special);
1084 string const opt = p.getArg('[', ']');
1087 if (hor_pos != "l" && hor_pos != "c" &&
1088 hor_pos != "r" && hor_pos != "s") {
1089 cerr << "invalid hor_pos " << hor_pos
1090 << " for " << inner_type << endl;
1097 inner_pos = p.getArg('[', ']');
1098 if (inner_pos != "c" && inner_pos != "t" &&
1099 inner_pos != "b" && inner_pos != "s") {
1100 cerr << "invalid inner_pos "
1101 << inner_pos << " for "
1102 << inner_type << endl;
1103 inner_pos = position;
1107 if (inner_type == "makebox")
1111 if (inner_type.empty()) {
1112 if (special.empty() && outer_type != "framebox")
1113 latex_width = "1\\columnwidth";
1116 latex_width = p2.getArg('[', ']');
1117 string const opt = p2.getArg('[', ']');
1120 if (hor_pos != "l" && hor_pos != "c" &&
1121 hor_pos != "r" && hor_pos != "s") {
1122 cerr << "invalid hor_pos " << hor_pos
1123 << " for " << outer_type << endl;
1127 if (outer_type == "framebox")
1131 } else if (inner_type != "makebox")
1132 latex_width = p.verbatim_item();
1133 // if e.g. only \ovalbox{content} was used, set the width to 1\columnwidth
1134 // as this is LyX's standard for such cases (except for makebox)
1135 // \framebox is more special and handled below
1136 if (latex_width.empty() && inner_type != "makebox"
1137 && outer_type != "framebox")
1138 latex_width = "1\\columnwidth";
1140 translate_len(latex_width, width_value, width_unit);
1142 bool shadedparbox = false;
1143 if (inner_type == "shaded") {
1144 eat_whitespace(p, os, parent_context, false);
1145 if (outer_type == "parbox") {
1147 if (p.next_token().cat() == catBegin)
1149 eat_whitespace(p, os, parent_context, false);
1150 shadedparbox = true;
1155 // If we already read the inner box we have to push the inner env
1156 if (!outer_type.empty() && !inner_type.empty() &&
1157 (inner_flags & FLAG_END))
1158 active_environments.push_back(inner_type);
1159 bool use_ert = false;
1160 if (!outer_type.empty() && !inner_type.empty()) {
1161 // Look whether there is some content after the end of the
1162 // inner box, but before the end of the outer box.
1163 // If yes, we need to output ERT.
1165 if (inner_flags & FLAG_END)
1166 p.ertEnvironment(inner_type);
1169 p.skip_spaces(true);
1170 bool const outer_env(outer_type == "framed" || outer_type == "minipage");
1171 if ((outer_env && p.next_token().asInput() != "\\end") ||
1172 (!outer_env && p.next_token().cat() != catEnd)) {
1173 // something is between the end of the inner box and
1174 // the end of the outer box, so we need to use ERT.
1182 if (!outer_type.empty()) {
1183 if (outer_flags & FLAG_END)
1184 ss << "\\begin{" << outer_type << '}';
1186 ss << '\\' << outer_type << '{';
1187 if (!special.empty())
1191 if (!inner_type.empty()) {
1192 if (inner_type != "shaded") {
1193 if (inner_flags & FLAG_END)
1194 ss << "\\begin{" << inner_type << '}';
1196 ss << '\\' << inner_type;
1198 if (!position.empty())
1199 ss << '[' << position << ']';
1200 if (!latex_height.empty())
1201 ss << '[' << latex_height << ']';
1202 if (!inner_pos.empty())
1203 ss << '[' << inner_pos << ']';
1204 ss << '{' << latex_width << '}';
1205 if (!(inner_flags & FLAG_END))
1208 if (inner_type == "shaded")
1209 ss << "\\begin{shaded}";
1210 output_ert_inset(os, ss.str(), parent_context);
1211 if (!inner_type.empty()) {
1212 parse_text(p, os, inner_flags, outer, parent_context);
1213 if (inner_flags & FLAG_END)
1214 output_ert_inset(os, "\\end{" + inner_type + '}',
1217 output_ert_inset(os, "}", parent_context);
1219 if (!outer_type.empty()) {
1220 // If we already read the inner box we have to pop
1222 if (!inner_type.empty() && (inner_flags & FLAG_END))
1223 active_environments.pop_back();
1225 // Ensure that the end of the outer box is parsed correctly:
1226 // The opening brace has been eaten by parse_outer_box()
1227 if (!outer_type.empty() && (outer_flags & FLAG_ITEM)) {
1228 outer_flags &= ~FLAG_ITEM;
1229 outer_flags |= FLAG_BRACE_LAST;
1231 parse_text(p, os, outer_flags, outer, parent_context);
1232 if (outer_flags & FLAG_END)
1233 output_ert_inset(os, "\\end{" + outer_type + '}',
1236 output_ert_inset(os, "}", parent_context);
1239 // LyX does not like empty positions, so we have
1240 // to set them to the LaTeX default values here.
1241 if (position.empty())
1243 if (inner_pos.empty())
1244 inner_pos = position;
1245 parent_context.check_layout(os);
1246 begin_inset(os, "Box ");
1247 if (outer_type == "framed")
1249 else if (outer_type == "framebox" || outer_type == "fbox" || !frame_color.empty())
1251 else if (outer_type == "shadowbox")
1252 os << "Shadowbox\n";
1253 else if ((outer_type == "shaded" && inner_type.empty()) ||
1254 (outer_type == "minipage" && inner_type == "shaded") ||
1255 (outer_type == "parbox" && inner_type == "shaded")) {
1257 preamble.registerAutomaticallyLoadedPackage("color");
1258 } else if (outer_type == "doublebox")
1259 os << "Doublebox\n";
1260 else if (outer_type.empty() || outer_type == "mbox")
1261 os << "Frameless\n";
1263 os << outer_type << '\n';
1264 os << "position \"" << position << "\"\n";
1265 os << "hor_pos \"" << hor_pos << "\"\n";
1266 if (outer_type == "mbox")
1267 os << "has_inner_box 1\n";
1268 else if (!frame_color.empty() && inner_type == "makebox")
1269 os << "has_inner_box 0\n";
1271 os << "has_inner_box " << !inner_type.empty() << "\n";
1272 os << "inner_pos \"" << inner_pos << "\"\n";
1273 os << "use_parbox " << (inner_type == "parbox" || shadedparbox)
1275 if (outer_type == "mbox")
1276 os << "use_makebox 1\n";
1277 else if (!frame_color.empty())
1278 os << "use_makebox 0\n";
1280 os << "use_makebox " << (inner_type == "makebox") << '\n';
1281 if (outer_type == "mbox" || (outer_type == "fbox" && inner_type.empty()))
1282 os << "width \"\"\n";
1283 // for values like "1.5\width" LyX uses "1.5in" as width ad sets "width" as sepecial
1284 else if (contains(width_unit, '\\'))
1285 os << "width \"" << width_value << "in" << "\"\n";
1287 os << "width \"" << width_value << width_unit << "\"\n";
1288 if (contains(width_unit, '\\')) {
1289 width_unit.erase (0,1); // remove the leading '\'
1290 os << "special \"" << width_unit << "\"\n";
1292 os << "special \"" << width_special << "\"\n";
1293 if (contains(height_unit, '\\'))
1294 os << "height \"" << height_value << "in" << "\"\n";
1296 os << "height \"" << height_value << height_unit << "\"\n";
1297 os << "height_special \"" << height_special << "\"\n";
1298 os << "thickness \"" << thickness << "\"\n";
1299 os << "separation \"" << separation << "\"\n";
1300 os << "shadowsize \"" << shadowsize << "\"\n";
1301 os << "framecolor \"" << framecolor << "\"\n";
1302 os << "backgroundcolor \"" << backgroundcolor << "\"\n";
1303 os << "status open\n\n";
1305 // Unfortunately we can't use parse_text_in_inset:
1306 // InsetBox::forcePlainLayout() is hard coded and does not
1307 // use the inset layout. Apart from that do we call parse_text
1308 // up to two times, but need only one check_end_layout.
1309 bool const forcePlainLayout =
1310 (!inner_type.empty() || inner_type == "makebox") &&
1311 outer_type != "shaded" && outer_type != "framed";
1312 Context context(true, parent_context.textclass);
1313 if (forcePlainLayout)
1314 context.layout = &context.textclass.plainLayout();
1316 context.font = parent_context.font;
1318 // If we have no inner box the contents will be read with the outer box
1319 if (!inner_type.empty())
1320 parse_text(p, os, inner_flags, outer, context);
1322 // Ensure that the end of the outer box is parsed correctly:
1323 // The opening brace has been eaten by parse_outer_box()
1324 if (!outer_type.empty() && (outer_flags & FLAG_ITEM)) {
1325 outer_flags &= ~FLAG_ITEM;
1326 outer_flags |= FLAG_BRACE_LAST;
1329 // Find end of outer box, output contents if inner_type is
1330 // empty and output possible comments
1331 if (!outer_type.empty()) {
1332 // If we already read the inner box we have to pop
1334 if (!inner_type.empty() && (inner_flags & FLAG_END))
1335 active_environments.pop_back();
1336 // This does not output anything but comments if
1337 // inner_type is not empty (see use_ert)
1338 parse_text(p, os, outer_flags, outer, context);
1341 context.check_end_layout(os);
1343 #ifdef PRESERVE_LAYOUT
1344 // LyX puts a % after the end of the minipage
1345 if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
1347 //output_comment(p, os, "dummy", parent_context);
1350 parent_context.new_paragraph(os);
1352 else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
1353 //output_comment(p, os, "dummy", parent_context);
1356 // We add a protected space if something real follows
1357 if (p.good() && p.next_token().cat() != catComment) {
1358 begin_inset(os, "space ~\n");
1364 if (inner_type == "minipage" && (!frame_color.empty() || !background_color.empty()))
1365 active_environments.pop_back();
1366 if (inner_flags != FLAG_BRACE_LAST && (!frame_color.empty() || !background_color.empty())) {
1367 // in this case we have to eat the the closing brace of the color box
1368 p.get_token().asInput(); // the '}'
1370 if (p.next_token().asInput() == "}") {
1371 // in this case we assume that the closing brace is from the box settings
1372 // therefore reset these values for the next box
1378 // all boxes except of Frameless and Shaded require calc
1379 if (!(outer_type.empty() || outer_type == "mbox") &&
1380 !((outer_type == "shaded" && inner_type.empty()) ||
1381 (outer_type == "minipage" && inner_type == "shaded") ||
1382 (outer_type == "parbox" && inner_type == "shaded")))
1383 preamble.registerAutomaticallyLoadedPackage("calc");
1387 void parse_outer_box(Parser & p, ostream & os, unsigned flags, bool outer,
1388 Context & parent_context, string const & outer_type,
1389 string const & special)
1391 eat_whitespace(p, os, parent_context, false);
1392 if (flags & FLAG_ITEM) {
1394 if (p.next_token().cat() == catBegin)
1397 cerr << "Warning: Ignoring missing '{' after \\"
1398 << outer_type << '.' << endl;
1399 eat_whitespace(p, os, parent_context, false);
1402 unsigned int inner_flags = 0;
1404 if (outer_type == "minipage" || outer_type == "parbox") {
1405 p.skip_spaces(true);
1406 while (p.hasOpt()) {
1408 p.skip_spaces(true);
1411 p.skip_spaces(true);
1412 if (outer_type == "parbox") {
1414 if (p.next_token().cat() == catBegin)
1416 p.skip_spaces(true);
1419 if (outer_type == "shaded" || outer_type == "mbox") {
1420 // These boxes never have an inner box
1422 } else if (p.next_token().asInput() == "\\parbox") {
1423 inner = p.get_token().cs();
1424 inner_flags = FLAG_ITEM;
1425 } else if (p.next_token().asInput() == "\\begin") {
1426 // Is this a minipage or shaded box?
1429 inner = p.getArg('{', '}');
1431 if (inner == "minipage" || inner == "shaded")
1432 inner_flags = FLAG_END;
1437 if (inner_flags == FLAG_END) {
1438 if (inner != "shaded")
1442 eat_whitespace(p, os, parent_context, false);
1444 parse_box(p, os, flags, FLAG_END, outer, parent_context,
1445 outer_type, special, inner, "", "");
1447 if (inner_flags == FLAG_ITEM) {
1449 eat_whitespace(p, os, parent_context, false);
1451 parse_box(p, os, flags, inner_flags, outer, parent_context,
1452 outer_type, special, inner, "", "");
1457 void parse_listings(Parser & p, ostream & os, Context & parent_context,
1458 bool in_line, bool use_minted)
1460 parent_context.check_layout(os);
1461 begin_inset(os, "listings\n");
1462 string arg = p.hasOpt() ? subst(p.verbatimOption(), "\n", "") : string();
1464 while ((i = arg.find(", ")) != string::npos
1465 || (i = arg.find(",\t")) != string::npos)
1466 arg.erase(i + 1, 1);
1469 string const language = p.getArg('{', '}');
1470 p.skip_spaces(true);
1471 arg += string(arg.empty() ? "" : ",") + "language=" + language;
1472 if (!minted_float.empty()) {
1473 arg += string(arg.empty() ? "" : ",") + minted_float;
1474 minted_nonfloat_caption.clear();
1478 os << "lstparams " << '"' << arg << '"' << '\n';
1479 if (arg.find("\\color") != string::npos)
1480 preamble.registerAutomaticallyLoadedPackage("color");
1483 os << "inline true\n";
1485 os << "inline false\n";
1486 os << "status collapsed\n";
1487 Context context(true, parent_context.textclass);
1488 context.layout = &parent_context.textclass.plainLayout();
1489 if (use_minted && prefixIs(minted_nonfloat_caption, "[t]")) {
1490 minted_nonfloat_caption.erase(0,3);
1491 os << "\n\\begin_layout Plain Layout\n";
1492 begin_inset(os, "Caption Standard\n");
1493 Context newcontext(true, context.textclass,
1494 context.layout, 0, context.font);
1495 newcontext.check_layout(os);
1496 os << minted_nonfloat_caption << "\n";
1497 newcontext.check_end_layout(os);
1499 os << "\n\\end_layout\n";
1500 minted_nonfloat_caption.clear();
1504 // set catcodes to verbatim early, just in case.
1505 p.setCatcodes(VERBATIM_CATCODES);
1506 string delim = p.get_token().asInput();
1507 //FIXME: handler error condition
1508 s = p.verbatimStuff(delim).second;
1509 // context.new_paragraph(os);
1510 } else if (use_minted) {
1511 s = p.verbatimEnvironment("minted");
1513 s = p.verbatimEnvironment("lstlisting");
1515 output_ert(os, s, context);
1516 if (use_minted && prefixIs(minted_nonfloat_caption, "[b]")) {
1517 minted_nonfloat_caption.erase(0,3);
1518 os << "\n\\begin_layout Plain Layout\n";
1519 begin_inset(os, "Caption Standard\n");
1520 Context newcontext(true, context.textclass,
1521 context.layout, 0, context.font);
1522 newcontext.check_layout(os);
1523 os << minted_nonfloat_caption << "\n";
1524 newcontext.check_end_layout(os);
1526 os << "\n\\end_layout\n";
1527 minted_nonfloat_caption.clear();
1529 // Don't close the inset here for floating minted listings.
1530 // It will be closed at the end of the listing environment.
1531 if (!use_minted || minted_float.empty())
1534 eat_whitespace(p, os, parent_context, true);
1535 Token t = p.get_token();
1536 if (t.asInput() != "\\end") {
1537 // If anything follows, collect it into a caption.
1538 minted_float_has_caption = true;
1539 os << "\n\\begin_layout Plain Layout\n"; // outer layout
1540 begin_inset(os, "Caption Standard\n");
1541 os << "\n\\begin_layout Plain Layout\n"; // inner layout
1548 /// parse an unknown environment
1549 void parse_unknown_environment(Parser & p, string const & name, ostream & os,
1550 unsigned flags, bool outer,
1551 Context & parent_context)
1553 if (name == "tabbing")
1554 // We need to remember that we have to handle '\=' specially
1555 flags |= FLAG_TABBING;
1557 // We need to translate font changes and paragraphs inside the
1558 // environment to ERT if we have a non standard font.
1559 // Otherwise things like
1560 // \large\begin{foo}\huge bar\end{foo}
1562 bool const specialfont =
1563 (parent_context.font != parent_context.normalfont);
1564 bool const new_layout_allowed = parent_context.new_layout_allowed;
1566 parent_context.new_layout_allowed = false;
1567 output_ert_inset(os, "\\begin{" + name + "}", parent_context);
1568 parse_text_snippet(p, os, flags, outer, parent_context);
1569 output_ert_inset(os, "\\end{" + name + "}", parent_context);
1571 parent_context.new_layout_allowed = new_layout_allowed;
1575 void parse_environment(Parser & p, ostream & os, bool outer,
1576 string & last_env, Context & parent_context)
1578 Layout const * newlayout;
1579 InsetLayout const * newinsetlayout = 0;
1580 string const name = p.getArg('{', '}');
1581 const bool is_starred = suffixIs(name, '*');
1582 string const unstarred_name = rtrim(name, "*");
1583 active_environments.push_back(name);
1585 if (is_math_env(name)) {
1586 parent_context.check_layout(os);
1587 begin_inset(os, "Formula ");
1588 os << "\\begin{" << name << "}";
1589 parse_math(p, os, FLAG_END, MATH_MODE);
1590 os << "\\end{" << name << "}";
1592 if (is_display_math_env(name)) {
1593 // Prevent the conversion of a line break to a space
1594 // (bug 7668). This does not change the output, but
1595 // looks ugly in LyX.
1596 eat_whitespace(p, os, parent_context, false);
1600 else if (is_known(name, preamble.polyglossia_languages)) {
1601 // We must begin a new paragraph if not already done
1602 if (! parent_context.atParagraphStart()) {
1603 parent_context.check_end_layout(os);
1604 parent_context.new_paragraph(os);
1606 // save the language in the context so that it is
1607 // handled by parse_text
1608 parent_context.font.language = preamble.polyglossia2lyx(name);
1609 parse_text(p, os, FLAG_END, outer, parent_context);
1610 // Just in case the environment is empty
1611 parent_context.extra_stuff.erase();
1612 // We must begin a new paragraph to reset the language
1613 parent_context.new_paragraph(os);
1617 else if (unstarred_name == "tabular" || name == "longtable") {
1618 eat_whitespace(p, os, parent_context, false);
1619 string width = "0pt";
1620 if (name == "tabular*") {
1621 width = lyx::translate_len(p.getArg('{', '}'));
1622 eat_whitespace(p, os, parent_context, false);
1624 parent_context.check_layout(os);
1625 begin_inset(os, "Tabular ");
1626 handle_tabular(p, os, name, width, parent_context);
1631 else if (parent_context.textclass.floats().typeExist(unstarred_name)) {
1632 eat_whitespace(p, os, parent_context, false);
1633 string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
1634 eat_whitespace(p, os, parent_context, false);
1635 parent_context.check_layout(os);
1636 begin_inset(os, "Float " + unstarred_name + "\n");
1637 // store the float type for subfloats
1638 // subfloats only work with figures and tables
1639 if (unstarred_name == "figure")
1640 float_type = unstarred_name;
1641 else if (unstarred_name == "table")
1642 float_type = unstarred_name;
1646 os << "placement " << opt << '\n';
1647 if (contains(opt, "H"))
1648 preamble.registerAutomaticallyLoadedPackage("float");
1650 Floating const & fl = parent_context.textclass.floats()
1651 .getType(unstarred_name);
1652 if (!fl.floattype().empty() && fl.usesFloatPkg())
1653 preamble.registerAutomaticallyLoadedPackage("float");
1656 os << "wide " << convert<string>(is_starred)
1657 << "\nsideways false"
1658 << "\nstatus open\n\n";
1659 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1661 // We don't need really a new paragraph, but
1662 // we must make sure that the next item gets a \begin_layout.
1663 parent_context.new_paragraph(os);
1665 // the float is parsed thus delete the type
1669 else if (unstarred_name == "sidewaysfigure"
1670 || unstarred_name == "sidewaystable"
1671 || unstarred_name == "sidewaysalgorithm") {
1672 string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
1673 eat_whitespace(p, os, parent_context, false);
1674 parent_context.check_layout(os);
1675 if (unstarred_name == "sidewaysfigure")
1676 begin_inset(os, "Float figure\n");
1677 else if (unstarred_name == "sidewaystable")
1678 begin_inset(os, "Float table\n");
1679 else if (unstarred_name == "sidewaysalgorithm")
1680 begin_inset(os, "Float algorithm\n");
1682 os << "placement " << opt << '\n';
1683 if (contains(opt, "H"))
1684 preamble.registerAutomaticallyLoadedPackage("float");
1685 os << "wide " << convert<string>(is_starred)
1686 << "\nsideways true"
1687 << "\nstatus open\n\n";
1688 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1690 // We don't need really a new paragraph, but
1691 // we must make sure that the next item gets a \begin_layout.
1692 parent_context.new_paragraph(os);
1694 preamble.registerAutomaticallyLoadedPackage("rotfloat");
1697 else if (name == "wrapfigure" || name == "wraptable") {
1698 // syntax is \begin{wrapfigure}[lines]{placement}[overhang]{width}
1699 eat_whitespace(p, os, parent_context, false);
1700 parent_context.check_layout(os);
1703 string overhang = "0col%";
1706 lines = p.getArg('[', ']');
1707 string const placement = p.getArg('{', '}');
1709 overhang = p.getArg('[', ']');
1710 string const width = p.getArg('{', '}');
1712 if (name == "wrapfigure")
1713 begin_inset(os, "Wrap figure\n");
1715 begin_inset(os, "Wrap table\n");
1716 os << "lines " << lines
1717 << "\nplacement " << placement
1718 << "\noverhang " << lyx::translate_len(overhang)
1719 << "\nwidth " << lyx::translate_len(width)
1720 << "\nstatus open\n\n";
1721 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1723 // We don't need really a new paragraph, but
1724 // we must make sure that the next item gets a \begin_layout.
1725 parent_context.new_paragraph(os);
1727 preamble.registerAutomaticallyLoadedPackage("wrapfig");
1730 else if (name == "minipage") {
1731 eat_whitespace(p, os, parent_context, false);
1732 // Test whether this is an outer box of a shaded box
1734 // swallow arguments
1735 while (p.hasOpt()) {
1737 p.skip_spaces(true);
1740 p.skip_spaces(true);
1741 Token t = p.get_token();
1742 bool shaded = false;
1743 if (t.asInput() == "\\begin") {
1744 p.skip_spaces(true);
1745 if (p.getArg('{', '}') == "shaded")
1750 parse_outer_box(p, os, FLAG_END, outer,
1751 parent_context, name, "shaded");
1753 parse_box(p, os, 0, FLAG_END, outer, parent_context,
1754 "", "", name, "", "");
1758 else if (name == "comment") {
1759 eat_whitespace(p, os, parent_context, false);
1760 parent_context.check_layout(os);
1761 begin_inset(os, "Note Comment\n");
1762 os << "status open\n";
1763 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1766 skip_braces(p); // eat {} that might by set by LyX behind comments
1767 preamble.registerAutomaticallyLoadedPackage("verbatim");
1770 else if (unstarred_name == "verbatim") {
1771 // FIXME: this should go in the generic code that
1772 // handles environments defined in layout file that
1773 // have "PassThru 1". However, the code over there is
1774 // already too complicated for my taste.
1775 string const ascii_name =
1776 (name == "verbatim*") ? "Verbatim*" : "Verbatim";
1777 parent_context.new_paragraph(os);
1778 Context context(true, parent_context.textclass,
1779 &parent_context.textclass[from_ascii(ascii_name)]);
1780 string s = p.verbatimEnvironment(name);
1781 output_ert(os, s, context);
1785 else if (name == "IPA") {
1786 eat_whitespace(p, os, parent_context, false);
1787 parent_context.check_layout(os);
1788 begin_inset(os, "IPA\n");
1789 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1792 preamble.registerAutomaticallyLoadedPackage("tipa");
1793 preamble.registerAutomaticallyLoadedPackage("tipx");
1796 else if (name == "CJK") {
1797 // the scheme is \begin{CJK}{encoding}{mapping}text\end{CJK}
1798 // It is impossible to decide if a CJK environment was in its own paragraph or within
1799 // a line. We therefore always assume a paragraph since the latter is a rare case.
1800 eat_whitespace(p, os, parent_context, false);
1801 parent_context.check_end_layout(os);
1802 // store the encoding to be able to reset it
1803 string const encoding_old = p.getEncoding();
1804 string const encoding = p.getArg('{', '}');
1805 // FIXME: For some reason JIS does not work. Although the text
1806 // in tests/CJK.tex is identical with the SJIS version if you
1807 // convert both snippets using the recode command line utility,
1808 // the resulting .lyx file contains some extra characters if
1809 // you set buggy_encoding to false for JIS.
1810 bool const buggy_encoding = encoding == "JIS";
1811 if (!buggy_encoding)
1812 p.setEncoding(encoding, Encoding::CJK);
1814 // FIXME: This will read garbage, since the data is not encoded in utf8.
1815 p.setEncoding("UTF-8");
1817 // LyX only supports the same mapping for all CJK
1818 // environments, so we might need to output everything as ERT
1819 string const mapping = trim(p.getArg('{', '}'));
1820 char const * const * const where =
1821 is_known(encoding, supported_CJK_encodings);
1822 if (!buggy_encoding && !preamble.fontCJKSet())
1823 preamble.fontCJK(mapping);
1824 bool knownMapping = mapping == preamble.fontCJK();
1825 if (buggy_encoding || !knownMapping || !where) {
1826 parent_context.check_layout(os);
1827 output_ert_inset(os, "\\begin{" + name + "}{" + encoding + "}{" + mapping + "}",
1829 // we must parse the content as verbatim because e.g. JIS can contain
1830 // normally invalid characters
1831 // FIXME: This works only for the most simple cases.
1832 // Since TeX control characters are not parsed,
1833 // things like comments are completely wrong.
1834 string const s = p.plainEnvironment("CJK");
1835 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
1838 if (snip == "\\" || is_known(snip, known_escaped_chars))
1839 output_ert_inset(os, snip, parent_context);
1840 else if (*it == '\n' && it + 1 != et && s.begin() + 1 != it)
1845 output_ert_inset(os, "\\end{" + name + "}",
1849 supported_CJK_languages[where - supported_CJK_encodings];
1850 // store the language because we must reset it at the end
1851 string const lang_old = parent_context.font.language;
1852 parent_context.font.language = lang;
1853 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1854 parent_context.font.language = lang_old;
1855 parent_context.new_paragraph(os);
1857 p.setEncoding(encoding_old);
1861 else if (name == "lyxgreyedout") {
1862 eat_whitespace(p, os, parent_context, false);
1863 parent_context.check_layout(os);
1864 begin_inset(os, "Note Greyedout\n");
1865 os << "status open\n";
1866 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1869 if (!preamble.notefontcolor().empty())
1870 preamble.registerAutomaticallyLoadedPackage("color");
1873 else if (name == "btSect") {
1874 eat_whitespace(p, os, parent_context, false);
1875 parent_context.check_layout(os);
1876 begin_command_inset(os, "bibtex", "bibtex");
1877 string bibstyle = "plain";
1879 bibstyle = p.getArg('[', ']');
1880 p.skip_spaces(true);
1882 string const bibfile = p.getArg('{', '}');
1883 eat_whitespace(p, os, parent_context, false);
1884 Token t = p.get_token();
1885 if (t.asInput() == "\\btPrintCited") {
1886 p.skip_spaces(true);
1887 os << "btprint " << '"' << "btPrintCited" << '"' << "\n";
1889 if (t.asInput() == "\\btPrintNotCited") {
1890 p.skip_spaces(true);
1891 os << "btprint " << '"' << "btPrintNotCited" << '"' << "\n";
1893 if (t.asInput() == "\\btPrintAll") {
1894 p.skip_spaces(true);
1895 os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
1897 os << "bibfiles " << '"' << bibfile << "\"\n"
1898 << "options " << '"' << bibstyle << "\"\n";
1899 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1904 else if (name == "framed" || name == "shaded") {
1905 eat_whitespace(p, os, parent_context, false);
1906 parse_outer_box(p, os, FLAG_END, outer, parent_context, name, "");
1908 preamble.registerAutomaticallyLoadedPackage("framed");
1911 else if (name == "listing") {
1912 minted_float = "float";
1913 eat_whitespace(p, os, parent_context, false);
1914 string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
1916 minted_float += "=" + opt;
1917 // If something precedes \begin{minted}, we output it at the end
1918 // as a caption, in order to keep it inside the listings inset.
1919 eat_whitespace(p, os, parent_context, true);
1921 Token const & t = p.get_token();
1922 p.skip_spaces(true);
1923 string const envname = p.next_token().cat() == catBegin
1924 ? p.getArg('{', '}') : string();
1925 bool prologue = t.asInput() != "\\begin" || envname != "minted";
1927 minted_float_has_caption = false;
1928 string content = parse_text_snippet(p, FLAG_END, outer,
1930 size_t i = content.find("\\begin_inset listings");
1931 bool minted_env = i != string::npos;
1934 caption = content.substr(0, i);
1935 content.erase(0, i);
1937 parent_context.check_layout(os);
1938 if (minted_env && minted_float_has_caption) {
1939 eat_whitespace(p, os, parent_context, true);
1940 os << content << "\n";
1941 if (!caption.empty())
1942 os << caption << "\n";
1943 os << "\n\\end_layout\n"; // close inner layout
1944 end_inset(os); // close caption inset
1945 os << "\n\\end_layout\n"; // close outer layout
1946 } else if (!caption.empty()) {
1948 begin_inset(os, "listings\n");
1949 os << "lstparams " << '"' << minted_float << '"' << '\n';
1950 os << "inline false\n";
1951 os << "status collapsed\n";
1953 os << "\n\\begin_layout Plain Layout\n";
1954 begin_inset(os, "Caption Standard\n");
1955 Context newcontext(true, parent_context.textclass,
1956 0, 0, parent_context.font);
1957 newcontext.check_layout(os);
1958 os << caption << "\n";
1959 newcontext.check_end_layout(os);
1961 os << "\n\\end_layout\n";
1962 } else if (content.empty()) {
1963 begin_inset(os, "listings\n");
1964 os << "lstparams " << '"' << minted_float << '"' << '\n';
1965 os << "inline false\n";
1966 os << "status collapsed\n";
1968 os << content << "\n";
1970 end_inset(os); // close listings inset
1971 parent_context.check_end_layout(os);
1972 parent_context.new_paragraph(os);
1974 minted_float.clear();
1975 minted_float_has_caption = false;
1978 else if (name == "lstlisting" || name == "minted") {
1979 bool use_minted = name == "minted";
1980 eat_whitespace(p, os, parent_context, false);
1981 if (use_minted && minted_float.empty()) {
1982 // look ahead for a bottom caption
1984 bool found_end_minted = false;
1985 while (!found_end_minted && p.good()) {
1986 Token const & t = p.get_token();
1988 string const envname =
1989 p.next_token().cat() == catBegin
1990 ? p.getArg('{', '}') : string();
1991 found_end_minted = t.asInput() == "\\end"
1992 && envname == "minted";
1994 eat_whitespace(p, os, parent_context, true);
1995 Token const & t = p.get_token();
1996 p.skip_spaces(true);
1997 if (t.asInput() == "\\lyxmintcaption") {
1998 string const pos = p.getArg('[', ']');
2000 string const caption =
2001 parse_text_snippet(p, FLAG_ITEM,
2002 false, parent_context);
2003 minted_nonfloat_caption = "[b]" + caption;
2008 parse_listings(p, os, parent_context, false, use_minted);
2012 else if (!parent_context.new_layout_allowed)
2013 parse_unknown_environment(p, name, os, FLAG_END, outer,
2016 // Alignment and spacing settings
2017 // FIXME (bug xxxx): These settings can span multiple paragraphs and
2018 // therefore are totally broken!
2019 // Note that \centering, \raggedright, and \raggedleft cannot be handled, as
2020 // they are commands not environments. They are furthermore switches that
2021 // can be ended by another switches, but also by commands like \footnote or
2022 // \parbox. So the only safe way is to leave them untouched.
2023 // However, we support the pseudo-environments
2024 // \begin{centering} ... \end{centering}
2025 // \begin{raggedright} ... \end{raggedright}
2026 // \begin{raggedleft} ... \end{raggedleft}
2027 // since they are used by LyX in floats (for spacing reasons)
2028 else if (name == "center" || name == "centering" ||
2029 name == "flushleft" || name == "raggedright" ||
2030 name == "flushright" || name == "raggedleft" ||
2031 name == "singlespace" || name == "onehalfspace" ||
2032 name == "doublespace" || name == "spacing") {
2033 eat_whitespace(p, os, parent_context, false);
2034 // We must begin a new paragraph if not already done
2035 if (! parent_context.atParagraphStart()) {
2036 parent_context.check_end_layout(os);
2037 parent_context.new_paragraph(os);
2039 if (name == "flushleft" || name == "raggedright")
2040 parent_context.add_extra_stuff("\\align left\n");
2041 else if (name == "flushright" || name == "raggedleft")
2042 parent_context.add_extra_stuff("\\align right\n");
2043 else if (name == "center" || name == "centering")
2044 parent_context.add_extra_stuff("\\align center\n");
2045 else if (name == "singlespace")
2046 parent_context.add_extra_stuff("\\paragraph_spacing single\n");
2047 else if (name == "onehalfspace") {
2048 parent_context.add_extra_stuff("\\paragraph_spacing onehalf\n");
2049 preamble.registerAutomaticallyLoadedPackage("setspace");
2050 } else if (name == "doublespace") {
2051 parent_context.add_extra_stuff("\\paragraph_spacing double\n");
2052 preamble.registerAutomaticallyLoadedPackage("setspace");
2053 } else if (name == "spacing") {
2054 parent_context.add_extra_stuff("\\paragraph_spacing other " + p.verbatim_item() + "\n");
2055 preamble.registerAutomaticallyLoadedPackage("setspace");
2057 parse_text(p, os, FLAG_END, outer, parent_context);
2058 // Just in case the environment is empty
2059 parent_context.extra_stuff.erase();
2060 // We must begin a new paragraph to reset the alignment
2061 parent_context.new_paragraph(os);
2065 // The single '=' is meant here.
2066 else if ((newlayout = findLayout(parent_context.textclass, name, false))) {
2067 eat_whitespace(p, os, parent_context, false);
2068 Context context(true, parent_context.textclass, newlayout,
2069 parent_context.layout, parent_context.font);
2070 if (parent_context.deeper_paragraph) {
2071 // We are beginning a nested environment after a
2072 // deeper paragraph inside the outer list environment.
2073 // Therefore we don't need to output a "begin deeper".
2074 context.need_end_deeper = true;
2076 parent_context.check_end_layout(os);
2077 if (last_env == name) {
2078 // we need to output a separator since LyX would export
2079 // the two environments as one otherwise (bug 5716)
2080 TeX2LyXDocClass const & textclass(parent_context.textclass);
2081 Context newcontext(true, textclass,
2082 &(textclass.defaultLayout()));
2083 newcontext.check_layout(os);
2084 begin_inset(os, "Separator plain\n");
2086 newcontext.check_end_layout(os);
2088 switch (context.layout->latextype) {
2089 case LATEX_LIST_ENVIRONMENT:
2090 context.add_par_extra_stuff("\\labelwidthstring "
2091 + p.verbatim_item() + '\n');
2094 case LATEX_BIB_ENVIRONMENT:
2095 p.verbatim_item(); // swallow next arg
2101 context.check_deeper(os);
2102 // handle known optional and required arguments
2103 if (context.layout->latextype == LATEX_ENVIRONMENT)
2104 output_arguments(os, p, outer, false, string(), context,
2105 context.layout->latexargs());
2106 else if (context.layout->latextype == LATEX_ITEM_ENVIRONMENT) {
2108 output_arguments(oss, p, outer, false, string(), context,
2109 context.layout->latexargs());
2110 context.list_extra_stuff = oss.str();
2112 parse_text(p, os, FLAG_END, outer, context);
2113 if (context.layout->latextype == LATEX_ENVIRONMENT)
2114 output_arguments(os, p, outer, false, "post", context,
2115 context.layout->postcommandargs());
2116 context.check_end_layout(os);
2117 if (parent_context.deeper_paragraph) {
2118 // We must suppress the "end deeper" because we
2119 // suppressed the "begin deeper" above.
2120 context.need_end_deeper = false;
2122 context.check_end_deeper(os);
2123 parent_context.new_paragraph(os);
2125 if (!preamble.titleLayoutFound())
2126 preamble.titleLayoutFound(newlayout->intitle);
2127 set<string> const & req = newlayout->requires();
2128 set<string>::const_iterator it = req.begin();
2129 set<string>::const_iterator en = req.end();
2130 for (; it != en; ++it)
2131 preamble.registerAutomaticallyLoadedPackage(*it);
2134 // The single '=' is meant here.
2135 else if ((newinsetlayout = findInsetLayout(parent_context.textclass, name, false))) {
2136 eat_whitespace(p, os, parent_context, false);
2137 parent_context.check_layout(os);
2138 begin_inset(os, "Flex ");
2139 os << to_utf8(newinsetlayout->name()) << '\n'
2140 << "status collapsed\n";
2141 if (newinsetlayout->isPassThru()) {
2142 string const arg = p.verbatimEnvironment(name);
2143 Context context(true, parent_context.textclass,
2144 &parent_context.textclass.plainLayout(),
2145 parent_context.layout);
2146 output_ert(os, arg, parent_context);
2148 parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
2152 else if (name == "appendix") {
2153 // This is no good latex style, but it works and is used in some documents...
2154 eat_whitespace(p, os, parent_context, false);
2155 parent_context.check_end_layout(os);
2156 Context context(true, parent_context.textclass, parent_context.layout,
2157 parent_context.layout, parent_context.font);
2158 context.check_layout(os);
2159 os << "\\start_of_appendix\n";
2160 parse_text(p, os, FLAG_END, outer, context);
2161 context.check_end_layout(os);
2165 else if (known_environments.find(name) != known_environments.end()) {
2166 vector<ArgumentType> arguments = known_environments[name];
2167 // The last "argument" denotes wether we may translate the
2168 // environment contents to LyX
2169 // The default required if no argument is given makes us
2170 // compatible with the reLyXre environment.
2171 ArgumentType contents = arguments.empty() ?
2174 if (!arguments.empty())
2175 arguments.pop_back();
2176 // See comment in parse_unknown_environment()
2177 bool const specialfont =
2178 (parent_context.font != parent_context.normalfont);
2179 bool const new_layout_allowed =
2180 parent_context.new_layout_allowed;
2182 parent_context.new_layout_allowed = false;
2183 parse_arguments("\\begin{" + name + "}", arguments, p, os,
2184 outer, parent_context);
2185 if (contents == verbatim)
2186 output_ert_inset(os, p.ertEnvironment(name),
2189 parse_text_snippet(p, os, FLAG_END, outer,
2191 output_ert_inset(os, "\\end{" + name + "}", parent_context);
2193 parent_context.new_layout_allowed = new_layout_allowed;
2197 parse_unknown_environment(p, name, os, FLAG_END, outer,
2201 active_environments.pop_back();
2205 /// parses a comment and outputs it to \p os.
2206 void parse_comment(Parser & p, ostream & os, Token const & t, Context & context)
2208 LASSERT(t.cat() == catComment, return);
2209 if (!t.cs().empty()) {
2210 context.check_layout(os);
2211 output_comment(p, os, t.cs(), context);
2212 if (p.next_token().cat() == catNewline) {
2213 // A newline after a comment line starts a new
2215 if (context.new_layout_allowed) {
2216 if(!context.atParagraphStart())
2217 // Only start a new paragraph if not already
2218 // done (we might get called recursively)
2219 context.new_paragraph(os);
2221 output_ert_inset(os, "\n", context);
2222 eat_whitespace(p, os, context, true);
2225 // "%\n" combination
2232 * Reads spaces and comments until the first non-space, non-comment token.
2233 * New paragraphs (double newlines or \\par) are handled like simple spaces
2234 * if \p eatParagraph is true.
2235 * Spaces are skipped, but comments are written to \p os.
2237 void eat_whitespace(Parser & p, ostream & os, Context & context,
2241 Token const & t = p.get_token();
2242 if (t.cat() == catComment)
2243 parse_comment(p, os, t, context);
2244 else if ((! eatParagraph && p.isParagraph()) ||
2245 (t.cat() != catSpace && t.cat() != catNewline)) {
2254 * Set a font attribute, parse text and reset the font attribute.
2255 * \param attribute Attribute name (e.g. \\family, \\shape etc.)
2256 * \param currentvalue Current value of the attribute. Is set to the new
2257 * value during parsing.
2258 * \param newvalue New value of the attribute
2260 void parse_text_attributes(Parser & p, ostream & os, unsigned flags, bool outer,
2261 Context & context, string const & attribute,
2262 string & currentvalue, string const & newvalue)
2264 context.check_layout(os);
2265 string const oldvalue = currentvalue;
2266 currentvalue = newvalue;
2267 os << '\n' << attribute << ' ' << newvalue << "\n";
2268 parse_text_snippet(p, os, flags, outer, context);
2269 context.check_layout(os);
2270 os << '\n' << attribute << ' ' << oldvalue << "\n";
2271 currentvalue = oldvalue;
2275 /// get the arguments of a natbib or jurabib citation command
2276 void get_cite_arguments(Parser & p, bool natbibOrder,
2277 string & before, string & after, bool const qualified = false)
2279 // We need to distinguish "" and "[]", so we can't use p.getOpt().
2281 // text before the citation
2283 // text after the citation
2284 after = qualified ? p.getFullOpt(false, '(', ')') : p.getFullOpt();
2286 if (!after.empty()) {
2287 before = qualified ? p.getFullOpt(false, '(', ')') : p.getFullOpt();
2288 if (natbibOrder && !before.empty())
2289 swap(before, after);
2294 void copy_file(FileName const & src, string dstname)
2298 string const absParent = getParentFilePath(false);
2300 if (FileName::isAbsolute(dstname))
2301 dst = FileName(dstname);
2303 dst = makeAbsPath(dstname, absParent);
2304 FileName const srcpath = src.onlyPath();
2305 FileName const dstpath = dst.onlyPath();
2306 if (equivalent(srcpath, dstpath))
2308 if (!dstpath.isDirectory()) {
2309 if (!dstpath.createPath()) {
2310 cerr << "Warning: Could not create directory for file `"
2311 << dst.absFileName() << "´." << endl;
2315 if (dst.isReadableFile()) {
2316 if (overwriteFiles())
2317 cerr << "Warning: Overwriting existing file `"
2318 << dst.absFileName() << "´." << endl;
2320 cerr << "Warning: Not overwriting existing file `"
2321 << dst.absFileName() << "´." << endl;
2325 if (!src.copyTo(dst))
2326 cerr << "Warning: Could not copy file `" << src.absFileName()
2327 << "´ to `" << dst.absFileName() << "´." << endl;
2331 /// Parse a literate Chunk section. The initial "<<" is already parsed.
2332 bool parse_chunk(Parser & p, ostream & os, Context & context)
2334 // check whether a chunk is possible here.
2335 if (!context.textclass.hasInsetLayout(from_ascii("Flex:Chunk"))) {
2341 // read the parameters
2342 Parser::Arg const params = p.verbatimStuff(">>=\n", false);
2343 if (!params.first) {
2348 Parser::Arg const code = p.verbatimStuff("\n@");
2353 string const post_chunk = p.verbatimStuff("\n").second + '\n';
2354 if (post_chunk[0] != ' ' && post_chunk[0] != '\n') {
2358 // The last newline read is important for paragraph handling
2362 //cerr << "params=[" << params.second << "], code=[" << code.second << "]" <<endl;
2363 // We must have a valid layout before outputting the Chunk inset.
2364 context.check_layout(os);
2365 Context chunkcontext(true, context.textclass);
2366 chunkcontext.layout = &context.textclass.plainLayout();
2367 begin_inset(os, "Flex Chunk");
2368 os << "\nstatus open\n";
2369 if (!params.second.empty()) {
2370 chunkcontext.check_layout(os);
2371 Context paramscontext(true, context.textclass);
2372 paramscontext.layout = &context.textclass.plainLayout();
2373 begin_inset(os, "Argument 1");
2374 os << "\nstatus open\n";
2375 output_ert(os, params.second, paramscontext);
2378 output_ert(os, code.second, chunkcontext);
2386 /// detects \\def, \\long\\def and \\global\\long\\def with ws and comments
2387 bool is_macro(Parser & p)
2389 Token first = p.curr_token();
2390 if (first.cat() != catEscape || !p.good())
2392 if (first.cs() == "def")
2394 if (first.cs() != "global" && first.cs() != "long")
2396 Token second = p.get_token();
2398 while (p.good() && !p.isParagraph() && (second.cat() == catSpace ||
2399 second.cat() == catNewline || second.cat() == catComment)) {
2400 second = p.get_token();
2403 bool secondvalid = second.cat() == catEscape;
2405 bool thirdvalid = false;
2406 if (p.good() && first.cs() == "global" && secondvalid &&
2407 second.cs() == "long") {
2408 third = p.get_token();
2410 while (p.good() && !p.isParagraph() &&
2411 (third.cat() == catSpace ||
2412 third.cat() == catNewline ||
2413 third.cat() == catComment)) {
2414 third = p.get_token();
2417 thirdvalid = third.cat() == catEscape;
2419 for (int i = 0; i < pos; ++i)
2424 return (first.cs() == "global" || first.cs() == "long") &&
2425 second.cs() == "def";
2426 return first.cs() == "global" && second.cs() == "long" &&
2427 third.cs() == "def";
2431 /// Parse a macro definition (assumes that is_macro() returned true)
2432 void parse_macro(Parser & p, ostream & os, Context & context)
2434 context.check_layout(os);
2435 Token first = p.curr_token();
2438 string command = first.asInput();
2439 if (first.cs() != "def") {
2441 eat_whitespace(p, os, context, false);
2442 second = p.curr_token();
2443 command += second.asInput();
2444 if (second.cs() != "def") {
2446 eat_whitespace(p, os, context, false);
2447 third = p.curr_token();
2448 command += third.asInput();
2451 eat_whitespace(p, os, context, false);
2452 string const name = p.get_token().cs();
2453 eat_whitespace(p, os, context, false);
2459 while (p.next_token().cat() != catBegin) {
2460 if (p.next_token().cat() == catParameter) {
2465 // followed by number?
2466 if (p.next_token().cat() == catOther) {
2467 string s = p.get_token().asInput();
2469 // number = current arity + 1?
2470 if (s.size() == 1 && s[0] == arity + '0' + 1)
2475 paramtext += p.get_token().cs();
2477 paramtext += p.get_token().cs();
2482 // only output simple (i.e. compatible) macro as FormulaMacros
2483 string ert = '\\' + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
2485 context.check_layout(os);
2486 begin_inset(os, "FormulaMacro");
2487 os << "\n\\def" << ert;
2490 output_ert_inset(os, command + ert, context);
2494 void registerExternalTemplatePackages(string const & name)
2496 external::TemplateManager const & etm = external::TemplateManager::get();
2497 external::Template const * const et = etm.getTemplateByName(name);
2500 external::Template::Formats::const_iterator cit = et->formats.end();
2502 cit = et->formats.find("PDFLaTeX");
2503 if (cit == et->formats.end())
2504 // If the template has not specified a PDFLaTeX output,
2505 // we try the LaTeX format.
2506 cit = et->formats.find("LaTeX");
2507 if (cit == et->formats.end())
2509 vector<string>::const_iterator qit = cit->second.requirements.begin();
2510 vector<string>::const_iterator qend = cit->second.requirements.end();
2511 for (; qit != qend; ++qit)
2512 preamble.registerAutomaticallyLoadedPackage(*qit);
2515 } // anonymous namespace
2519 * Find a file with basename \p name in path \p path and an extension
2522 string find_file(string const & name, string const & path,
2523 char const * const * extensions)
2525 for (char const * const * what = extensions; *what; ++what) {
2526 string const trial = addExtension(name, *what);
2527 if (makeAbsPath(trial, path).exists())
2534 /// Convert filenames with TeX macros and/or quotes to something LyX
2536 string const normalize_filename(string const & name)
2541 Token const & t = p.get_token();
2542 if (t.cat() != catEscape)
2544 else if (t.cs() == "lyxdot") {
2545 // This is used by LyX for simple dots in relative
2549 } else if (t.cs() == "space") {
2552 } else if (t.cs() == "string") {
2553 // Convert \string" to " and \string~ to ~
2554 Token const & n = p.next_token();
2555 if (n.asInput() != "\"" && n.asInput() != "~")
2560 // Strip quotes. This is a bit complicated (see latex_path()).
2561 string full = os.str();
2562 if (!full.empty() && full[0] == '"') {
2563 string base = removeExtension(full);
2564 string ext = getExtension(full);
2565 if (!base.empty() && base[base.length()-1] == '"')
2568 return addExtension(trim(base, "\""), ext);
2569 if (full[full.length()-1] == '"')
2572 return trim(full, "\"");
2578 /// Convert \p name from TeX convention (relative to master file) to LyX
2579 /// convention (relative to .lyx file) if it is relative
2580 void fix_child_filename(string & name)
2582 string const absMasterTeX = getMasterFilePath(true);
2583 bool const isabs = FileName::isAbsolute(name);
2584 // convert from "relative to .tex master" to absolute original path
2586 name = makeAbsPath(name, absMasterTeX).absFileName();
2587 bool copyfile = copyFiles();
2588 string const absParentLyX = getParentFilePath(false);
2591 // convert from absolute original path to "relative to master file"
2592 string const rel = to_utf8(makeRelPath(from_utf8(name),
2593 from_utf8(absMasterTeX)));
2594 // re-interpret "relative to .tex file" as "relative to .lyx file"
2595 // (is different if the master .lyx file resides in a
2596 // different path than the master .tex file)
2597 string const absMasterLyX = getMasterFilePath(false);
2598 abs = makeAbsPath(rel, absMasterLyX).absFileName();
2599 // Do not copy if the new path is impossible to create. Example:
2600 // absMasterTeX = "/foo/bar/"
2601 // absMasterLyX = "/bar/"
2602 // name = "/baz.eps" => new absolute name would be "/../baz.eps"
2603 if (contains(name, "/../"))
2610 // convert from absolute original path to
2611 // "relative to .lyx file"
2612 name = to_utf8(makeRelPath(from_utf8(abs),
2613 from_utf8(absParentLyX)));
2617 // convert from absolute original path to "relative to .lyx file"
2618 name = to_utf8(makeRelPath(from_utf8(name),
2619 from_utf8(absParentLyX)));
2624 void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
2625 Context & context, string const rdelim)
2627 Layout const * newlayout = 0;
2628 InsetLayout const * newinsetlayout = 0;
2629 char const * const * where = 0;
2630 // Store the latest bibliographystyle, addcontentslineContent and
2631 // nocite{*} option (needed for bibtex inset)
2633 string contentslineContent;
2634 string bibliographystyle = "default";
2635 bool const use_natbib = isProvided("natbib");
2636 bool const use_jurabib = isProvided("jurabib");
2637 bool const use_biblatex = isProvided("biblatex")
2638 && preamble.citeEngine() != "biblatex-natbib";
2639 bool const use_biblatex_natbib = isProvided("biblatex-natbib")
2640 || (isProvided("biblatex") && preamble.citeEngine() == "biblatex-natbib");
2641 need_commentbib = use_biblatex || use_biblatex_natbib;
2644 // it is impossible to determine the correct encoding for non-CJK Japanese.
2645 // Therefore write a note at the beginning of the document
2646 if (is_nonCJKJapanese) {
2647 context.check_layout(os);
2648 begin_inset(os, "Note Note\n");
2649 os << "status open\n\\begin_layout Plain Layout\n"
2650 << "\\series bold\n"
2651 << "Important information:\n"
2652 << "\\end_layout\n\n"
2653 << "\\begin_layout Plain Layout\n"
2654 << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
2655 << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
2656 << " The iconv encoding " << p.getEncoding() << " was used.\n"
2657 << " If this is incorrect, you must run the tex2lyx program on the command line\n"
2658 << " and specify the encoding using the -e command-line switch.\n"
2659 << " In addition, you might want to double check that the desired output encoding\n"
2660 << " is correctly selected in Document > Settings > Language.\n"
2661 << "\\end_layout\n";
2663 is_nonCJKJapanese = false;
2666 bool have_cycled = false;
2668 // Leave here only after at least one cycle
2669 if (have_cycled && flags & FLAG_LEAVE) {
2670 flags &= ~FLAG_LEAVE;
2674 Token const & t = p.get_token();
2676 debugToken(cerr, t, flags);
2679 if (flags & FLAG_ITEM) {
2680 if (t.cat() == catSpace)
2683 flags &= ~FLAG_ITEM;
2684 if (t.cat() == catBegin) {
2685 // skip the brace and collect everything to the next matching
2687 flags |= FLAG_BRACE_LAST;
2691 // handle only this single token, leave the loop if done
2692 flags |= FLAG_LEAVE;
2695 if (t.cat() != catEscape && t.character() == ']' &&
2696 (flags & FLAG_BRACK_LAST))
2698 if (t.cat() == catEnd && (flags & FLAG_BRACE_LAST))
2700 string tok = t.asInput();
2701 // we only support delimiters with max 2 chars for now.
2702 if (rdelim.size() > 1)
2703 tok += p.next_token().asInput();
2704 if (t.cat() != catEscape && !rdelim.empty()
2705 && tok == rdelim && (flags & FLAG_RDELIM)) {
2706 if (rdelim.size() > 1)
2707 p.get_token(); // eat rdelim
2711 // If there is anything between \end{env} and \begin{env} we
2712 // don't need to output a separator.
2713 if (t.cat() != catSpace && t.cat() != catNewline &&
2714 t.asInput() != "\\begin")
2721 bool const starred = p.next_token().asInput() == "*";
2722 string const starredname(starred ? (t.cs() + '*') : t.cs());
2723 if (t.cat() == catMath) {
2724 // we are inside some text mode thingy, so opening new math is allowed
2725 context.check_layout(os);
2726 begin_inset(os, "Formula ");
2727 Token const & n = p.get_token();
2728 bool const display(n.cat() == catMath && outer);
2730 // TeX's $$...$$ syntax for displayed math
2732 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
2734 p.get_token(); // skip the second '$' token
2736 // simple $...$ stuff
2739 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
2744 // Prevent the conversion of a line break to a
2745 // space (bug 7668). This does not change the
2746 // output, but looks ugly in LyX.
2747 eat_whitespace(p, os, context, false);
2752 if (t.cat() == catSuper || t.cat() == catSub) {
2753 cerr << "catcode " << t << " illegal in text mode\n";
2757 // Basic support for quotes. We try to disambiguate
2758 // quotes from the context (e.g., a left english quote is
2759 // the same as a right german quote...).
2760 // Try to make a smart guess about the side
2761 Token const prev = p.prev_token();
2762 bool const opening = (prev.cat() != catSpace && prev.character() != 0
2763 && prev.character() != '\n' && prev.character() != '~');
2764 if (t.asInput() == "`" && p.next_token().asInput() == "`") {
2765 context.check_layout(os);
2766 begin_inset(os, "Quotes ");
2767 os << guessQuoteStyle("eld", opening);
2773 if (t.asInput() == "'" && p.next_token().asInput() == "'") {
2774 context.check_layout(os);
2775 begin_inset(os, "Quotes ");
2776 os << guessQuoteStyle("erd", opening);
2783 if (t.asInput() == ">" && p.next_token().asInput() == ">") {
2784 context.check_layout(os);
2785 begin_inset(os, "Quotes ");
2786 os << guessQuoteStyle("ald", opening);
2793 if (t.asInput() == "<"
2794 && p.next_token().asInput() == "<") {
2795 bool has_chunk = false;
2799 has_chunk = parse_chunk(p, os, context);
2805 context.check_layout(os);
2806 begin_inset(os, "Quotes ");
2807 os << guessQuoteStyle("ard", opening);
2815 if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph())) {
2816 check_space(p, os, context);
2820 // babel shorthands (also used by polyglossia)
2821 // Since these can have different meanings for different languages
2822 // we import them as ERT (but they must be put in ERT to get output
2824 if (t.asInput() == "\"") {
2826 // These are known pairs. We put them together in
2827 // one ERT inset. In other cases (such as "a), only
2828 // the quotation mark is ERTed.
2829 if (p.next_token().asInput() == "\""
2830 || p.next_token().asInput() == "|"
2831 || p.next_token().asInput() == "-"
2832 || p.next_token().asInput() == "~"
2833 || p.next_token().asInput() == "="
2834 || p.next_token().asInput() == "/"
2835 || p.next_token().asInput() == "~"
2836 || p.next_token().asInput() == "'"
2837 || p.next_token().asInput() == "`"
2838 || p.next_token().asInput() == "<"
2839 || p.next_token().asInput() == ">") {
2840 s += p.next_token().asInput();
2843 output_ert_inset(os, s, context);
2847 if (t.character() == '[' && noweb_mode &&
2848 p.next_token().character() == '[') {
2849 // These can contain underscores
2851 string const s = p.getFullOpt() + ']';
2852 if (p.next_token().character() == ']')
2855 cerr << "Warning: Inserting missing ']' in '"
2856 << s << "'." << endl;
2857 output_ert_inset(os, s, context);
2861 if (t.cat() == catLetter) {
2862 context.check_layout(os);
2867 if (t.cat() == catOther ||
2868 t.cat() == catAlign ||
2869 t.cat() == catParameter) {
2870 context.check_layout(os);
2871 if (t.asInput() == "-" && p.next_token().asInput() == "-" &&
2872 context.merging_hyphens_allowed &&
2873 context.font.family != "ttfamily" &&
2874 !context.layout->pass_thru) {
2875 if (p.next_next_token().asInput() == "-") {
2877 os << to_utf8(docstring(1, 0x2014));
2881 os << to_utf8(docstring(1, 0x2013));
2884 // This translates "&" to "\\&" which may be wrong...
2889 if (p.isParagraph()) {
2890 // In minted floating listings we will collect
2891 // everything into the caption, where multiple
2892 // paragraphs are forbidden.
2893 if (minted_float.empty()) {
2894 if (context.new_layout_allowed)
2895 context.new_paragraph(os);
2897 output_ert_inset(os, "\\par ", context);
2900 eat_whitespace(p, os, context, true);
2904 if (t.cat() == catActive) {
2905 context.check_layout(os);
2906 if (t.character() == '~') {
2907 if (context.layout->free_spacing)
2910 begin_inset(os, "space ~\n");
2918 if (t.cat() == catBegin) {
2919 Token const next = p.next_token();
2920 Token const end = p.next_next_token();
2921 if (next.cat() == catEnd) {
2923 Token const prev = p.prev_token();
2925 if (p.next_token().character() == '`')
2926 ; // ignore it in {}``
2928 output_ert_inset(os, "{}", context);
2929 } else if (next.cat() == catEscape &&
2930 is_known(next.cs(), known_quotes) &&
2931 end.cat() == catEnd) {
2932 // Something like {\textquoteright} (e.g.
2933 // from writer2latex). We may skip the
2934 // braces here for better readability.
2935 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2937 } else if (p.next_token().asInput() == "\\ascii") {
2938 // handle the \ascii characters
2939 // (the case without braces is handled later)
2940 // the code is "{\ascii\xxx}"
2941 p.get_token(); // eat \ascii
2942 string name2 = p.get_token().asInput();
2943 p.get_token(); // eat the final '}'
2944 string const name = "{\\ascii" + name2 + "}";
2948 // get the character from unicodesymbols
2949 docstring s = encodings.fromLaTeXCommand(from_utf8(name),
2950 Encodings::TEXT_CMD, termination, rem, &req);
2952 context.check_layout(os);
2955 output_ert_inset(os,
2956 to_utf8(rem), context);
2957 for (set<string>::const_iterator it = req.begin();
2958 it != req.end(); ++it)
2959 preamble.registerAutomaticallyLoadedPackage(*it);
2961 // we did not find a non-ert version
2962 output_ert_inset(os, name, context);
2964 context.check_layout(os);
2965 // special handling of font attribute changes
2966 Token const prev = p.prev_token();
2967 TeXFont const oldFont = context.font;
2968 if (next.character() == '[' ||
2969 next.character() == ']' ||
2970 next.character() == '*') {
2972 if (p.next_token().cat() == catEnd) {
2977 output_ert_inset(os, "{", context);
2978 parse_text_snippet(p, os,
2981 output_ert_inset(os, "}", context);
2983 } else if (! context.new_layout_allowed) {
2984 output_ert_inset(os, "{", context);
2985 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2987 output_ert_inset(os, "}", context);
2988 } else if (is_known(next.cs(), known_sizes)) {
2989 // next will change the size, so we must
2991 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2993 if (!context.atParagraphStart())
2995 << context.font.size << "\n";
2996 } else if (is_known(next.cs(), known_font_families)) {
2997 // next will change the font family, so we
2998 // must reset it here
2999 parse_text_snippet(p, os, FLAG_BRACE_LAST,
3001 if (!context.atParagraphStart())
3003 << context.font.family << "\n";
3004 } else if (is_known(next.cs(), known_font_series)) {
3005 // next will change the font series, so we
3006 // must reset it here
3007 parse_text_snippet(p, os, FLAG_BRACE_LAST,
3009 if (!context.atParagraphStart())
3011 << context.font.series << "\n";
3012 } else if (is_known(next.cs(), known_font_shapes)) {
3013 // next will change the font shape, so we
3014 // must reset it here
3015 parse_text_snippet(p, os, FLAG_BRACE_LAST,
3017 if (!context.atParagraphStart())
3019 << context.font.shape << "\n";
3020 } else if (is_known(next.cs(), known_old_font_families) ||
3021 is_known(next.cs(), known_old_font_series) ||
3022 is_known(next.cs(), known_old_font_shapes)) {
3023 // next will change the font family, series
3024 // and shape, so we must reset it here
3025 parse_text_snippet(p, os, FLAG_BRACE_LAST,
3027 if (!context.atParagraphStart())
3029 << context.font.family
3031 << context.font.series
3033 << context.font.shape << "\n";
3035 output_ert_inset(os, "{", context);
3036 parse_text_snippet(p, os, FLAG_BRACE_LAST,
3038 output_ert_inset(os, "}", context);
3044 if (t.cat() == catEnd) {
3045 if (flags & FLAG_BRACE_LAST) {
3048 cerr << "stray '}' in text\n";
3049 output_ert_inset(os, "}", context);
3053 if (t.cat() == catComment) {
3054 parse_comment(p, os, t, context);
3059 // control sequences
3062 if (t.cs() == "(" || t.cs() == "[") {
3063 bool const simple = t.cs() == "(";
3064 context.check_layout(os);
3065 begin_inset(os, "Formula");
3066 os << " \\" << t.cs();
3067 parse_math(p, os, simple ? FLAG_SIMPLE2 : FLAG_EQUATION, MATH_MODE);
3068 os << '\\' << (simple ? ')' : ']');
3071 // Prevent the conversion of a line break to a
3072 // space (bug 7668). This does not change the
3073 // output, but looks ugly in LyX.
3074 eat_whitespace(p, os, context, false);
3079 if (t.cs() == "begin") {
3080 parse_environment(p, os, outer, last_env,
3085 if (t.cs() == "end") {
3086 if (flags & FLAG_END) {
3087 // eat environment name
3088 string const name = p.getArg('{', '}');
3089 if (name != active_environment())
3090 cerr << "\\end{" + name + "} does not match \\begin{"
3091 + active_environment() + "}\n";
3094 p.error("found 'end' unexpectedly");
3098 // "item" by default, but could be something else
3099 if (t.cs() == context.layout->itemcommand()) {
3101 if (context.layout->labeltype == LABEL_MANUAL) {
3102 // FIXME: This swallows comments, but we cannot use
3103 // eat_whitespace() since we must not output
3104 // anything before the item.
3105 p.skip_spaces(true);
3106 s = p.verbatimOption();
3108 p.skip_spaces(false);
3110 context.check_layout(os);
3111 if (context.has_item) {
3112 // An item in an unknown list-like environment
3113 // FIXME: Do this in check_layout()!
3114 context.has_item = false;
3115 string item = "\\" + context.layout->itemcommand();
3118 output_ert_inset(os, item, context);
3120 if (context.layout->labeltype != LABEL_MANUAL)
3121 output_arguments(os, p, outer, false, "item", context,
3122 context.layout->itemargs());
3123 if (!context.list_extra_stuff.empty()) {
3124 os << context.list_extra_stuff;
3125 context.list_extra_stuff.clear();
3127 else if (!s.empty()) {
3128 // LyX adds braces around the argument,
3129 // so we need to remove them here.
3130 if (s.size() > 2 && s[0] == '{' &&
3131 s[s.size()-1] == '}')
3132 s = s.substr(1, s.size()-2);
3133 // If the argument contains a space we
3134 // must put it into ERT: Otherwise LyX
3135 // would misinterpret the space as
3136 // item delimiter (bug 7663)
3137 if (contains(s, ' ')) {
3138 output_ert_inset(os, s, context);
3141 os << parse_text_snippet(p2,
3142 FLAG_BRACK_LAST, outer, context);
3144 // The space is needed to separate the
3145 // item from the rest of the sentence.
3147 eat_whitespace(p, os, context, false);
3152 if (t.cs() == "bibitem") {
3154 context.check_layout(os);
3155 eat_whitespace(p, os, context, false);
3156 string label = p.verbatimOption();
3157 pair<bool, string> lbl = convert_latexed_command_inset_arg(label);
3158 bool const literal = !lbl.first;
3159 label = literal ? subst(label, "\n", " ") : lbl.second;
3160 string lit = literal ? "\"true\"" : "\"false\"";
3161 string key = convert_literate_command_inset_arg(p.verbatim_item());
3162 begin_command_inset(os, "bibitem", "bibitem");
3163 os << "label \"" << label << "\"\n"
3164 << "key \"" << key << "\"\n"
3165 << "literal " << lit << "\n";
3171 // catch the case of \def\inputGnumericTable
3173 if (t.cs() == "def") {
3174 Token second = p.next_token();
3175 if (second.cs() == "inputGnumericTable") {
3179 Token third = p.get_token();
3181 if (third.cs() == "input") {
3185 string name = normalize_filename(p.verbatim_item());
3186 string const path = getMasterFilePath(true);
3187 // We want to preserve relative / absolute filenames,
3188 // therefore path is only used for testing
3189 // The file extension is in every case ".tex".
3190 // So we need to remove this extension and check for
3191 // the original one.
3192 name = removeExtension(name);
3193 if (!makeAbsPath(name, path).exists()) {
3194 char const * const Gnumeric_formats[] = {"gnumeric",
3196 string const Gnumeric_name =
3197 find_file(name, path, Gnumeric_formats);
3198 if (!Gnumeric_name.empty())
3199 name = Gnumeric_name;
3201 FileName const absname = makeAbsPath(name, path);
3202 if (absname.exists()) {
3203 fix_child_filename(name);
3204 copy_file(absname, name);
3206 cerr << "Warning: Could not find file '"
3207 << name << "'." << endl;
3208 context.check_layout(os);
3209 begin_inset(os, "External\n\ttemplate ");
3210 os << "GnumericSpreadsheet\n\tfilename "
3213 context.check_layout(os);
3215 // register the packages that are automatically loaded
3216 // by the Gnumeric template
3217 registerExternalTemplatePackages("GnumericSpreadsheet");
3222 parse_macro(p, os, context);
3226 if (t.cs() == "noindent") {
3228 context.add_par_extra_stuff("\\noindent\n");
3232 if (t.cs() == "appendix") {
3233 context.add_par_extra_stuff("\\start_of_appendix\n");
3234 // We need to start a new paragraph. Otherwise the
3235 // appendix in 'bla\appendix\chapter{' would start
3237 context.new_paragraph(os);
3238 // We need to make sure that the paragraph is
3239 // generated even if it is empty. Otherwise the
3240 // appendix in '\par\appendix\par\chapter{' would
3242 context.check_layout(os);
3243 // FIXME: This is a hack to prevent paragraph
3244 // deletion if it is empty. Handle this better!
3245 output_comment(p, os,
3246 "dummy comment inserted by tex2lyx to "
3247 "ensure that this paragraph is not empty",
3249 // Both measures above may generate an additional
3250 // empty paragraph, but that does not hurt, because
3251 // whitespace does not matter here.
3252 eat_whitespace(p, os, context, true);
3256 // Must catch empty dates before findLayout is called below
3257 if (t.cs() == "date") {
3258 eat_whitespace(p, os, context, false);
3260 string const date = p.verbatim_item();
3263 preamble.suppressDate(true);
3266 preamble.suppressDate(false);
3267 if (context.new_layout_allowed &&
3268 (newlayout = findLayout(context.textclass,
3271 output_command_layout(os, p, outer,
3272 context, newlayout);
3273 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3274 if (!preamble.titleLayoutFound())
3275 preamble.titleLayoutFound(newlayout->intitle);
3276 set<string> const & req = newlayout->requires();
3277 set<string>::const_iterator it = req.begin();
3278 set<string>::const_iterator en = req.end();
3279 for (; it != en; ++it)
3280 preamble.registerAutomaticallyLoadedPackage(*it);
3282 output_ert_inset(os,
3283 "\\date{" + p.verbatim_item() + '}',
3289 // Before we look for the layout name with star and alone below, we check the layouts including
3290 // the LateXParam, which might be one or several options or a star.
3291 // The single '=' is meant here.
3292 if (context.new_layout_allowed &&
3293 (newlayout = findLayout(context.textclass, t.cs(), true, p.getCommandLatexParam()))) {
3294 // store the latexparam here. This is eaten in output_command_layout
3295 context.latexparam = newlayout->latexparam();
3297 output_command_layout(os, p, outer, context, newlayout);
3298 context.latexparam.clear();
3300 if (!preamble.titleLayoutFound())
3301 preamble.titleLayoutFound(newlayout->intitle);
3302 set<string> const & req = newlayout->requires();
3303 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
3304 preamble.registerAutomaticallyLoadedPackage(*it);
3309 // Starred section headings
3310 // Must attempt to parse "Section*" before "Section".
3311 if ((p.next_token().asInput() == "*") &&
3312 context.new_layout_allowed &&
3313 (newlayout = findLayout(context.textclass, t.cs() + '*', true))) {
3316 output_command_layout(os, p, outer, context, newlayout);
3318 if (!preamble.titleLayoutFound())
3319 preamble.titleLayoutFound(newlayout->intitle);
3320 set<string> const & req = newlayout->requires();
3321 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
3322 preamble.registerAutomaticallyLoadedPackage(*it);
3326 // Section headings and the like
3327 if (context.new_layout_allowed &&
3328 (newlayout = findLayout(context.textclass, t.cs(), true))) {
3330 output_command_layout(os, p, outer, context, newlayout);
3332 if (!preamble.titleLayoutFound())
3333 preamble.titleLayoutFound(newlayout->intitle);
3334 set<string> const & req = newlayout->requires();
3335 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
3336 preamble.registerAutomaticallyLoadedPackage(*it);
3340 if (t.cs() == "subfloat") {
3341 // the syntax is \subfloat[list entry][sub caption]{content}
3342 // if it is a table of figure depends on the surrounding float
3344 // do nothing if there is no outer float
3345 if (!float_type.empty()) {
3346 context.check_layout(os);
3348 begin_inset(os, "Float " + float_type + "\n");
3350 << "\nsideways false"
3351 << "\nstatus collapsed\n\n";
3354 bool has_caption = false;
3355 if (p.next_token().cat() != catEscape &&
3356 p.next_token().character() == '[') {
3357 p.get_token(); // eat '['
3358 caption = parse_text_snippet(p, FLAG_BRACK_LAST, outer, context);
3361 // In case we have two optional args, the second is the caption.
3362 if (p.next_token().cat() != catEscape &&
3363 p.next_token().character() == '[') {
3364 p.get_token(); // eat '['
3365 caption = parse_text_snippet(p, FLAG_BRACK_LAST, outer, context);
3368 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
3369 // the caption comes always as the last
3371 // we must make sure that the caption gets a \begin_layout
3372 os << "\n\\begin_layout Plain Layout";
3374 begin_inset(os, "Caption Standard\n");
3375 Context newcontext(true, context.textclass,
3376 0, 0, context.font);
3377 newcontext.check_layout(os);
3378 os << caption << "\n";
3379 newcontext.check_end_layout(os);
3382 // close the layout we opened
3383 os << "\n\\end_layout";
3388 // if the float type is not supported or there is no surrounding float
3393 opt_arg1 = convert_literate_command_inset_arg(p.getFullOpt());
3395 opt_arg2 = convert_literate_command_inset_arg(p.getFullOpt());
3397 output_ert_inset(os, t.asInput() + opt_arg1 + opt_arg2
3398 + "{" + p.verbatim_item() + '}', context);
3403 if (t.cs() == "includegraphics") {
3404 bool const clip = p.next_token().asInput() == "*";
3407 string const arg = p.getArg('[', ']');
3408 map<string, string> opts;
3409 vector<string> keys;
3410 split_map(arg, opts, keys);
3412 opts["clip"] = string();
3413 string name = normalize_filename(p.verbatim_item());
3415 string const path = getMasterFilePath(true);
3416 // We want to preserve relative / absolute filenames,
3417 // therefore path is only used for testing
3418 if (!makeAbsPath(name, path).exists()) {
3419 // The file extension is probably missing.
3420 // Now try to find it out.
3421 string const dvips_name =
3422 find_file(name, path,
3423 known_dvips_graphics_formats);
3424 string const pdftex_name =
3425 find_file(name, path,
3426 known_pdftex_graphics_formats);
3427 if (!dvips_name.empty()) {
3428 if (!pdftex_name.empty()) {
3429 cerr << "This file contains the "
3431 "\"\\includegraphics{"
3433 "However, files\n\""
3434 << dvips_name << "\" and\n\""
3435 << pdftex_name << "\"\n"
3436 "both exist, so I had to make a "
3437 "choice and took the first one.\n"
3438 "Please move the unwanted one "
3439 "someplace else and try again\n"
3440 "if my choice was wrong."
3444 } else if (!pdftex_name.empty()) {
3450 FileName const absname = makeAbsPath(name, path);
3451 if (absname.exists()) {
3452 fix_child_filename(name);
3453 copy_file(absname, name);
3455 cerr << "Warning: Could not find graphics file '"
3456 << name << "'." << endl;
3458 context.check_layout(os);
3459 begin_inset(os, "Graphics ");
3460 os << "\n\tfilename " << name << '\n';
3461 if (opts.find("width") != opts.end())
3463 << translate_len(opts["width"]) << '\n';
3464 if (opts.find("height") != opts.end())
3466 << translate_len(opts["height"]) << '\n';
3467 if (opts.find("scale") != opts.end()) {
3468 istringstream iss(opts["scale"]);
3472 os << "\tscale " << val << '\n';
3474 if (opts.find("angle") != opts.end()) {
3475 os << "\trotateAngle "
3476 << opts["angle"] << '\n';
3477 vector<string>::const_iterator a =
3478 find(keys.begin(), keys.end(), "angle");
3479 vector<string>::const_iterator s =
3480 find(keys.begin(), keys.end(), "width");
3481 if (s == keys.end())
3482 s = find(keys.begin(), keys.end(), "height");
3483 if (s == keys.end())
3484 s = find(keys.begin(), keys.end(), "scale");
3485 if (s != keys.end() && distance(s, a) > 0)
3486 os << "\tscaleBeforeRotation\n";
3488 if (opts.find("origin") != opts.end()) {
3490 string const opt = opts["origin"];
3491 if (opt.find('l') != string::npos) ss << "left";
3492 if (opt.find('r') != string::npos) ss << "right";
3493 if (opt.find('c') != string::npos) ss << "center";
3494 if (opt.find('t') != string::npos) ss << "Top";
3495 if (opt.find('b') != string::npos) ss << "Bottom";
3496 if (opt.find('B') != string::npos) ss << "Baseline";
3497 if (!ss.str().empty())
3498 os << "\trotateOrigin " << ss.str() << '\n';
3500 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
3502 if (opts.find("keepaspectratio") != opts.end())
3503 os << "\tkeepAspectRatio\n";
3504 if (opts.find("clip") != opts.end())
3506 if (opts.find("draft") != opts.end())
3508 if (opts.find("bb") != opts.end())
3509 os << "\tBoundingBox "
3510 << opts["bb"] << '\n';
3511 int numberOfbbOptions = 0;
3512 if (opts.find("bbllx") != opts.end())
3513 numberOfbbOptions++;
3514 if (opts.find("bblly") != opts.end())
3515 numberOfbbOptions++;
3516 if (opts.find("bburx") != opts.end())
3517 numberOfbbOptions++;
3518 if (opts.find("bbury") != opts.end())
3519 numberOfbbOptions++;
3520 if (numberOfbbOptions == 4)
3521 os << "\tBoundingBox "
3522 << opts["bbllx"] << " " << opts["bblly"] << " "
3523 << opts["bburx"] << " " << opts["bbury"] << '\n';
3524 else if (numberOfbbOptions > 0)
3525 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
3526 numberOfbbOptions = 0;
3527 if (opts.find("natwidth") != opts.end())
3528 numberOfbbOptions++;
3529 if (opts.find("natheight") != opts.end())
3530 numberOfbbOptions++;
3531 if (numberOfbbOptions == 2)
3532 os << "\tBoundingBox 0bp 0bp "
3533 << opts["natwidth"] << " " << opts["natheight"] << '\n';
3534 else if (numberOfbbOptions > 0)
3535 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
3536 ostringstream special;
3537 if (opts.find("hiresbb") != opts.end())
3538 special << "hiresbb,";
3539 if (opts.find("trim") != opts.end())
3541 if (opts.find("viewport") != opts.end())
3542 special << "viewport=" << opts["viewport"] << ',';
3543 if (opts.find("totalheight") != opts.end())
3544 special << "totalheight=" << opts["totalheight"] << ',';
3545 if (opts.find("type") != opts.end())
3546 special << "type=" << opts["type"] << ',';
3547 if (opts.find("ext") != opts.end())
3548 special << "ext=" << opts["ext"] << ',';
3549 if (opts.find("read") != opts.end())
3550 special << "read=" << opts["read"] << ',';
3551 if (opts.find("command") != opts.end())
3552 special << "command=" << opts["command"] << ',';
3553 string s_special = special.str();
3554 if (!s_special.empty()) {
3555 // We had special arguments. Remove the trailing ','.
3556 os << "\tspecial " << s_special.substr(0, s_special.size() - 1) << '\n';
3558 // TODO: Handle the unknown settings better.
3559 // Warn about invalid options.
3560 // Check whether some option was given twice.
3562 preamble.registerAutomaticallyLoadedPackage("graphicx");
3566 if (t.cs() == "footnote" ||
3567 (t.cs() == "thanks" && context.layout->intitle)) {
3569 context.check_layout(os);
3570 begin_inset(os, "Foot\n");
3571 os << "status collapsed\n\n";
3572 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
3577 if (t.cs() == "marginpar") {
3579 context.check_layout(os);
3580 begin_inset(os, "Marginal\n");
3581 os << "status collapsed\n\n";
3582 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
3587 if (t.cs() == "lstinline" || t.cs() == "mintinline") {
3588 bool const use_minted = t.cs() == "mintinline";
3590 parse_listings(p, os, context, true, use_minted);
3594 if (t.cs() == "ensuremath") {
3596 context.check_layout(os);
3597 string const s = p.verbatim_item();
3598 //FIXME: this never triggers in UTF8
3599 if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
3602 output_ert_inset(os, "\\ensuremath{" + s + "}",
3607 else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
3608 if (preamble.titleLayoutFound()) {
3610 skip_spaces_braces(p);
3612 output_ert_inset(os, t.asInput(), context);
3616 if (t.cs() == "tableofcontents"
3617 || t.cs() == "lstlistoflistings"
3618 || t.cs() == "listoflistings") {
3619 string name = t.cs();
3620 if (preamble.minted() && name == "listoflistings")
3621 name.insert(0, "lst");
3622 context.check_layout(os);
3623 begin_command_inset(os, "toc", name);
3625 skip_spaces_braces(p);
3626 if (name == "lstlistoflistings") {
3627 if (preamble.minted())
3628 preamble.registerAutomaticallyLoadedPackage("minted");
3630 preamble.registerAutomaticallyLoadedPackage("listings");
3635 if (t.cs() == "listoffigures" || t.cs() == "listoftables") {
3636 context.check_layout(os);
3637 if (t.cs() == "listoffigures")
3638 begin_inset(os, "FloatList figure\n");
3640 begin_inset(os, "FloatList table\n");
3642 skip_spaces_braces(p);
3646 if (t.cs() == "listof") {
3647 p.skip_spaces(true);
3648 string const name = p.get_token().cs();
3649 if (context.textclass.floats().typeExist(name)) {
3650 context.check_layout(os);
3651 begin_inset(os, "FloatList ");
3654 p.get_token(); // swallow second arg
3656 output_ert_inset(os, "\\listof{" + name + "}", context);
3660 if ((where = is_known(t.cs(), known_text_font_families))) {
3661 parse_text_attributes(p, os, FLAG_ITEM, outer,
3662 context, "\\family", context.font.family,
3663 known_coded_font_families[where - known_text_font_families]);
3667 // beamer has a \textbf<overlay>{} inset
3668 if (!p.hasOpt("<") && (where = is_known(t.cs(), known_text_font_series))) {
3669 parse_text_attributes(p, os, FLAG_ITEM, outer,
3670 context, "\\series", context.font.series,
3671 known_coded_font_series[where - known_text_font_series]);
3675 // beamer has a \textit<overlay>{} inset
3676 if (!p.hasOpt("<") && (where = is_known(t.cs(), known_text_font_shapes))) {
3677 parse_text_attributes(p, os, FLAG_ITEM, outer,
3678 context, "\\shape", context.font.shape,
3679 known_coded_font_shapes[where - known_text_font_shapes]);
3683 if (t.cs() == "textnormal" || t.cs() == "normalfont") {
3684 context.check_layout(os);
3685 TeXFont oldFont = context.font;
3686 context.font.init();
3687 context.font.size = oldFont.size;
3688 os << "\n\\family " << context.font.family << "\n";
3689 os << "\n\\series " << context.font.series << "\n";
3690 os << "\n\\shape " << context.font.shape << "\n";
3691 if (t.cs() == "textnormal") {
3692 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3693 output_font_change(os, context.font, oldFont);
3694 context.font = oldFont;
3696 eat_whitespace(p, os, context, false);
3700 if (t.cs() == "textcolor") {
3701 // scheme is \textcolor{color name}{text}
3702 string const color = p.verbatim_item();
3703 // we support the predefined colors of the color and the xcolor package
3704 if (color == "black" || color == "blue" || color == "cyan"
3705 || color == "green" || color == "magenta" || color == "red"
3706 || color == "white" || color == "yellow") {
3707 context.check_layout(os);
3708 os << "\n\\color " << color << "\n";
3709 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3710 context.check_layout(os);
3711 os << "\n\\color inherit\n";
3712 preamble.registerAutomaticallyLoadedPackage("color");
3713 } else if (color == "brown" || color == "darkgray" || color == "gray"
3714 || color == "lightgray" || color == "lime" || color == "olive"
3715 || color == "orange" || color == "pink" || color == "purple"
3716 || color == "teal" || color == "violet") {
3717 context.check_layout(os);
3718 os << "\n\\color " << color << "\n";
3719 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3720 context.check_layout(os);
3721 os << "\n\\color inherit\n";
3722 preamble.registerAutomaticallyLoadedPackage("xcolor");
3724 // for custom defined colors
3725 output_ert_inset(os, t.asInput() + "{" + color + "}", context);
3729 if (t.cs() == "underbar" || t.cs() == "uline") {
3730 // \underbar is not 100% correct (LyX outputs \uline
3731 // of ulem.sty). The difference is that \ulem allows
3732 // line breaks, and \underbar does not.
3733 // Do NOT handle \underline.
3734 // \underbar cuts through y, g, q, p etc.,
3735 // \underline does not.
3736 context.check_layout(os);
3737 os << "\n\\bar under\n";
3738 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3739 context.check_layout(os);
3740 os << "\n\\bar default\n";
3741 preamble.registerAutomaticallyLoadedPackage("ulem");
3745 if (t.cs() == "sout") {
3746 context.check_layout(os);
3747 os << "\n\\strikeout on\n";
3748 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3749 context.check_layout(os);
3750 os << "\n\\strikeout default\n";
3751 preamble.registerAutomaticallyLoadedPackage("ulem");
3755 // beamer has an \emph<overlay>{} inset
3756 if ((t.cs() == "uuline" || t.cs() == "uwave"
3757 || t.cs() == "emph" || t.cs() == "noun"
3758 || t.cs() == "xout") && !p.hasOpt("<")) {
3759 context.check_layout(os);
3760 os << "\n\\" << t.cs() << " on\n";
3761 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3762 context.check_layout(os);
3763 os << "\n\\" << t.cs() << " default\n";
3764 if (t.cs() == "uuline" || t.cs() == "uwave" || t.cs() == "xout")
3765 preamble.registerAutomaticallyLoadedPackage("ulem");
3769 if (t.cs() == "lyxadded" || t.cs() == "lyxdeleted") {
3770 context.check_layout(os);
3771 string name = p.getArg('{', '}');
3772 string localtime = p.getArg('{', '}');
3773 preamble.registerAuthor(name);
3774 Author const & author = preamble.getAuthor(name);
3775 // from_asctime_utc() will fail if LyX decides to output the
3776 // time in the text language.
3777 time_t ptime = from_asctime_utc(localtime);
3778 if (ptime == static_cast<time_t>(-1)) {
3779 cerr << "Warning: Could not parse time `" << localtime
3780 << "´ for change tracking, using current time instead.\n";
3781 ptime = current_time();
3783 if (t.cs() == "lyxadded")
3784 os << "\n\\change_inserted ";
3786 os << "\n\\change_deleted ";
3787 os << author.bufferId() << ' ' << ptime << '\n';
3788 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3789 bool dvipost = LaTeXPackages::isAvailable("dvipost");
3790 bool xcolorulem = LaTeXPackages::isAvailable("ulem") &&
3791 LaTeXPackages::isAvailable("xcolor");
3792 // No need to test for luatex, since luatex comes in
3793 // two flavours (dvi and pdf), like latex, and those
3794 // are detected by pdflatex.
3795 if (pdflatex || xetex) {
3797 preamble.registerAutomaticallyLoadedPackage("ulem");
3798 preamble.registerAutomaticallyLoadedPackage("xcolor");
3799 preamble.registerAutomaticallyLoadedPackage("pdfcolmk");
3803 preamble.registerAutomaticallyLoadedPackage("dvipost");
3804 } else if (xcolorulem) {
3805 preamble.registerAutomaticallyLoadedPackage("ulem");
3806 preamble.registerAutomaticallyLoadedPackage("xcolor");
3812 if (t.cs() == "textipa") {
3813 context.check_layout(os);
3814 begin_inset(os, "IPA\n");
3815 bool merging_hyphens_allowed = context.merging_hyphens_allowed;
3816 context.merging_hyphens_allowed = false;
3817 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
3818 context.merging_hyphens_allowed = merging_hyphens_allowed;
3820 preamble.registerAutomaticallyLoadedPackage("tipa");
3821 preamble.registerAutomaticallyLoadedPackage("tipx");
3825 if ((preamble.isPackageUsed("tipa") && t.cs() == "t" && p.next_token().asInput() == "*")
3826 || t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
3827 context.check_layout(os);
3831 string const type = (t.cs() == "t") ? "bottomtiebar" : t.cs().substr(4);
3832 begin_inset(os, "IPADeco " + type + "\n");
3833 os << "status open\n";
3834 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
3840 if (t.cs() == "textvertline") {
3841 // FIXME: This is not correct, \textvertline is higher than |
3847 if (t.cs() == "tone" ) {
3848 context.check_layout(os);
3849 // register the tone package
3850 preamble.registerAutomaticallyLoadedPackage("tone");
3851 string content = trimSpaceAndEol(p.verbatim_item());
3852 string command = t.asInput() + "{" + content + "}";
3853 // some tones can be detected by unicodesymbols, some need special code
3854 if (is_known(content, known_tones)) {
3855 os << "\\IPAChar " << command << "\n";
3858 // try to see whether the string is in unicodesymbols
3862 docstring s = encodings.fromLaTeXCommand(from_utf8(command),
3863 Encodings::TEXT_CMD | Encodings::MATH_CMD,
3864 termination, rem, &req);
3868 output_ert_inset(os, to_utf8(rem), context);
3869 for (set<string>::const_iterator it = req.begin();
3870 it != req.end(); ++it)
3871 preamble.registerAutomaticallyLoadedPackage(*it);
3873 // we did not find a non-ert version
3874 output_ert_inset(os, command, context);
3878 if (t.cs() == "phantom" || t.cs() == "hphantom" ||
3879 t.cs() == "vphantom") {
3880 context.check_layout(os);
3881 if (t.cs() == "phantom")
3882 begin_inset(os, "Phantom Phantom\n");
3883 if (t.cs() == "hphantom")
3884 begin_inset(os, "Phantom HPhantom\n");
3885 if (t.cs() == "vphantom")
3886 begin_inset(os, "Phantom VPhantom\n");
3887 os << "status open\n";
3888 parse_text_in_inset(p, os, FLAG_ITEM, outer, context,
3894 if (t.cs() == "href") {
3895 context.check_layout(os);
3896 string target = convert_literate_command_inset_arg(p.verbatim_item());
3897 string name = p.verbatim_item();
3898 pair<bool, string> nm = convert_latexed_command_inset_arg(name);
3899 bool const literal = !nm.first;
3900 name = literal ? subst(name, "\n", " ") : nm.second;
3901 string lit = literal ? "\"true\"" : "\"false\"";
3903 size_t i = target.find(':');
3904 if (i != string::npos) {
3905 type = target.substr(0, i + 1);
3906 if (type == "mailto:" || type == "file:")
3907 target = target.substr(i + 1);
3908 // handle the case that name is equal to target, except of "http(s)://"
3909 else if (target.substr(i + 3) == name && (type == "http:" || type == "https:"))
3912 begin_command_inset(os, "href", "href");
3914 os << "name \"" << name << "\"\n";
3915 os << "target \"" << target << "\"\n";
3916 if (type == "mailto:" || type == "file:")
3917 os << "type \"" << type << "\"\n";
3918 os << "literal " << lit << "\n";
3920 skip_spaces_braces(p);
3924 if (t.cs() == "lyxline") {
3925 // swallow size argument (it is not used anyway)
3927 if (!context.atParagraphStart()) {
3928 // so our line is in the middle of a paragraph
3929 // we need to add a new line, lest this line
3930 // follow the other content on that line and
3931 // run off the side of the page
3932 // FIXME: This may create an empty paragraph,
3933 // but without that it would not be
3934 // possible to set noindent below.
3935 // Fortunately LaTeX does not care
3936 // about the empty paragraph.
3937 context.new_paragraph(os);
3939 if (preamble.indentParagraphs()) {
3940 // we need to unindent, lest the line be too long
3941 context.add_par_extra_stuff("\\noindent\n");
3943 context.check_layout(os);
3944 begin_command_inset(os, "line", "rule");
3945 os << "offset \"0.5ex\"\n"
3946 "width \"100line%\"\n"
3952 if (t.cs() == "rule") {
3953 string const offset = (p.hasOpt() ? p.getArg('[', ']') : string());
3954 string const width = p.getArg('{', '}');
3955 string const thickness = p.getArg('{', '}');
3956 context.check_layout(os);
3957 begin_command_inset(os, "line", "rule");
3958 if (!offset.empty())
3959 os << "offset \"" << translate_len(offset) << "\"\n";
3960 os << "width \"" << translate_len(width) << "\"\n"
3961 "height \"" << translate_len(thickness) << "\"\n";
3966 // handle refstyle first to catch \eqref which can also occur
3967 // without refstyle. Only recognize these commands if
3968 // refstyle.sty was found in the preamble (otherwise \eqref
3969 // and user defined ref commands could be misdetected).
3970 if ((where = is_known(t.cs(), known_refstyle_commands))
3971 && preamble.refstyle()) {
3972 context.check_layout(os);
3973 begin_command_inset(os, "ref", "formatted");
3974 os << "reference \"";
3975 os << known_refstyle_prefixes[where - known_refstyle_commands]
3977 os << convert_literate_command_inset_arg(p.verbatim_item())
3979 os << "plural \"false\"\n";
3980 os << "caps \"false\"\n";
3981 os << "noprefix \"false\"\n";
3983 preamble.registerAutomaticallyLoadedPackage("refstyle");
3987 // if refstyle is used, we must not convert \prettyref to a
3988 // formatted reference, since that would result in a refstyle command.
3989 if ((where = is_known(t.cs(), known_ref_commands)) &&
3990 (t.cs() != "prettyref" || !preamble.refstyle())) {
3991 string const opt = p.getOpt();
3993 context.check_layout(os);
3994 begin_command_inset(os, "ref",
3995 known_coded_ref_commands[where - known_ref_commands]);
3996 os << "reference \""
3997 << convert_literate_command_inset_arg(p.verbatim_item())
3999 os << "plural \"false\"\n";
4000 os << "caps \"false\"\n";
4001 os << "noprefix \"false\"\n";
4003 if (t.cs() == "vref" || t.cs() == "vpageref")
4004 preamble.registerAutomaticallyLoadedPackage("varioref");
4005 else if (t.cs() == "prettyref")
4006 preamble.registerAutomaticallyLoadedPackage("prettyref");
4008 // LyX does not yet support optional arguments of ref commands
4009 output_ert_inset(os, t.asInput() + '[' + opt + "]{" +
4010 p.verbatim_item() + '}', context);
4016 is_known(t.cs(), known_natbib_commands) &&
4017 ((t.cs() != "citefullauthor" &&
4018 t.cs() != "citeyear" &&
4019 t.cs() != "citeyearpar") ||
4020 p.next_token().asInput() != "*")) {
4021 context.check_layout(os);
4022 string command = t.cs();
4023 if (p.next_token().asInput() == "*") {
4027 if (command == "citefullauthor")
4028 // alternative name for "\\citeauthor*"
4029 command = "citeauthor*";
4031 // text before the citation
4033 // text after the citation
4035 get_cite_arguments(p, true, before, after);
4037 if (command == "cite") {
4038 // \cite without optional argument means
4039 // \citet, \cite with at least one optional
4040 // argument means \citep.
4041 if (before.empty() && after.empty())
4046 if (before.empty() && after == "[]")
4047 // avoid \citet[]{a}
4049 else if (before == "[]" && after == "[]") {
4050 // avoid \citet[][]{a}
4054 bool literal = false;
4055 pair<bool, string> aft;
4056 pair<bool, string> bef;
4057 // remove the brackets around after and before
4058 if (!after.empty()) {
4060 after.erase(after.length() - 1, 1);
4061 aft = convert_latexed_command_inset_arg(after);
4062 literal = !aft.first;
4063 after = literal ? subst(after, "\n", " ") : aft.second;
4065 if (!before.empty()) {
4067 before.erase(before.length() - 1, 1);
4068 bef = convert_latexed_command_inset_arg(before);
4069 literal |= !bef.first;
4070 before = literal ? subst(before, "\n", " ") : bef.second;
4071 if (literal && !after.empty())
4072 after = subst(after, "\n", " ");
4074 string lit = literal ? "\"true\"" : "\"false\"";
4075 begin_command_inset(os, "citation", command);
4076 os << "after " << '"' << after << '"' << "\n";
4077 os << "before " << '"' << before << '"' << "\n";
4079 << convert_literate_command_inset_arg(p.verbatim_item())
4081 << "literal " << lit << "\n";
4083 // Need to set the cite engine if natbib is loaded by
4084 // the document class directly
4085 if (preamble.citeEngine() == "basic")
4086 preamble.citeEngine("natbib");
4091 && is_known(t.cs(), known_biblatex_commands)
4092 && ((t.cs() == "cite"
4093 || t.cs() == "citeauthor"
4094 || t.cs() == "Citeauthor"
4095 || t.cs() == "parencite"
4096 || t.cs() == "citetitle")
4097 || p.next_token().asInput() != "*"))
4098 || (use_biblatex_natbib
4099 && (is_known(t.cs(), known_biblatex_commands)
4100 || is_known(t.cs(), known_natbib_commands))
4101 && ((t.cs() == "cite" || t.cs() == "citet" || t.cs() == "Citet"
4102 || t.cs() == "citep" || t.cs() == "Citep" || t.cs() == "citealt"
4103 || t.cs() == "Citealt" || t.cs() == "citealp" || t.cs() == "Citealp"
4104 || t.cs() == "citeauthor" || t.cs() == "Citeauthor"
4105 || t.cs() == "parencite" || t.cs() == "citetitle")
4106 || p.next_token().asInput() != "*"))){
4107 context.check_layout(os);
4108 string command = t.cs();
4109 if (p.next_token().asInput() == "*") {
4114 bool const qualified = suffixIs(command, "s");
4116 command = rtrim(command, "s");
4118 // text before the citation
4120 // text after the citation
4122 get_cite_arguments(p, true, before, after, qualified);
4124 // These use natbib cmd names in LyX
4125 // for inter-citeengine compativility
4126 if (command == "citeyear")
4127 command = "citebyear";
4128 else if (command == "cite*")
4129 command = "citeyear";
4130 else if (command == "textcite")
4132 else if (command == "Textcite")
4134 else if (command == "parencite")
4136 else if (command == "Parencite")
4138 else if (command == "parencite*")
4139 command = "citeyearpar";
4140 else if (command == "smartcite")
4141 command = "footcite";
4142 else if (command == "Smartcite")
4143 command = "Footcite";
4145 string const emptyarg = qualified ? "()" : "[]";
4146 if (before.empty() && after == emptyarg)
4149 else if (before == emptyarg && after == emptyarg) {
4150 // avoid \cite[][]{a}
4154 bool literal = false;
4155 pair<bool, string> aft;
4156 pair<bool, string> bef;
4157 // remove the brackets around after and before
4158 if (!after.empty()) {
4160 after.erase(after.length() - 1, 1);
4161 aft = convert_latexed_command_inset_arg(after);
4162 literal = !aft.first;
4163 after = literal ? subst(after, "\n", " ") : aft.second;
4165 if (!before.empty()) {
4167 before.erase(before.length() - 1, 1);
4168 bef = convert_latexed_command_inset_arg(before);
4169 literal |= !bef.first;
4170 before = literal ? subst(before, "\n", " ") : bef.second;
4172 string keys, pretextlist, posttextlist;
4174 map<string, string> pres, posts, preslit, postslit;
4175 vector<string> lkeys;
4176 // text before the citation
4177 string lbefore, lbeforelit;
4178 // text after the citation
4179 string lafter, lafterlit;
4181 pair<bool, string> laft, lbef;
4183 get_cite_arguments(p, true, lbefore, lafter);
4184 // remove the brackets around after and before
4185 if (!lafter.empty()) {
4187 lafter.erase(lafter.length() - 1, 1);
4188 laft = convert_latexed_command_inset_arg(lafter);
4189 literal |= !laft.first;
4190 lafter = laft.second;
4191 lafterlit = subst(lbefore, "\n", " ");
4193 if (!lbefore.empty()) {
4194 lbefore.erase(0, 1);
4195 lbefore.erase(lbefore.length() - 1, 1);
4196 lbef = convert_latexed_command_inset_arg(lbefore);
4197 literal |= !lbef.first;
4198 lbefore = lbef.second;
4199 lbeforelit = subst(lbefore, "\n", " ");
4201 if (lbefore.empty() && lafter == "[]") {
4206 else if (lbefore == "[]" && lafter == "[]") {
4207 // avoid \cite[][]{a}
4213 lkey = p.getArg('{', '}');
4216 if (!lbefore.empty()) {
4217 pres.insert(make_pair(lkey, lbefore));
4218 preslit.insert(make_pair(lkey, lbeforelit));
4220 if (!lafter.empty()) {
4221 posts.insert(make_pair(lkey, lafter));
4222 postslit.insert(make_pair(lkey, lafterlit));
4224 lkeys.push_back(lkey);
4226 keys = convert_literate_command_inset_arg(getStringFromVector(lkeys));
4231 for (auto const & ptl : pres) {
4232 if (!pretextlist.empty())
4233 pretextlist += '\t';
4234 pretextlist += ptl.first + " " + ptl.second;
4236 for (auto const & potl : posts) {
4237 if (!posttextlist.empty())
4238 posttextlist += '\t';
4239 posttextlist += potl.first + " " + potl.second;
4242 keys = convert_literate_command_inset_arg(p.verbatim_item());
4245 after = subst(after, "\n", " ");
4246 if (!before.empty())
4247 before = subst(after, "\n", " ");
4249 string lit = literal ? "\"true\"" : "\"false\"";
4250 begin_command_inset(os, "citation", command);
4251 os << "after " << '"' << after << '"' << "\n";
4252 os << "before " << '"' << before << '"' << "\n";
4256 if (!pretextlist.empty())
4257 os << "pretextlist " << '"' << pretextlist << '"' << "\n";
4258 if (!posttextlist.empty())
4259 os << "posttextlist " << '"' << posttextlist << '"' << "\n";
4260 os << "literal " << lit << "\n";
4262 // Need to set the cite engine if biblatex is loaded by
4263 // the document class directly
4264 if (preamble.citeEngine() == "basic")
4265 use_biblatex_natbib ?
4266 preamble.citeEngine("biblatex-natbib")
4267 : preamble.citeEngine("biblatex");
4272 is_known(t.cs(), known_jurabib_commands) &&
4273 (t.cs() == "cite" || p.next_token().asInput() != "*")) {
4274 context.check_layout(os);
4275 string command = t.cs();
4276 if (p.next_token().asInput() == "*") {
4280 char argumentOrder = '\0';
4281 vector<string> const options =
4282 preamble.getPackageOptions("jurabib");
4283 if (find(options.begin(), options.end(),
4284 "natbiborder") != options.end())
4285 argumentOrder = 'n';
4286 else if (find(options.begin(), options.end(),
4287 "jurabiborder") != options.end())
4288 argumentOrder = 'j';
4290 // text before the citation
4292 // text after the citation
4294 get_cite_arguments(p, argumentOrder != 'j', before, after);
4296 string const citation = p.verbatim_item();
4297 if (!before.empty() && argumentOrder == '\0') {
4298 cerr << "Warning: Assuming argument order "
4299 "of jurabib version 0.6 for\n'"
4300 << command << before << after << '{'
4301 << citation << "}'.\n"
4302 "Add 'jurabiborder' to the jurabib "
4303 "package options if you used an\n"
4304 "earlier jurabib version." << endl;
4306 bool literal = false;
4307 pair<bool, string> aft;
4308 pair<bool, string> bef;
4309 // remove the brackets around after and before
4310 if (!after.empty()) {
4312 after.erase(after.length() - 1, 1);
4313 aft = convert_latexed_command_inset_arg(after);
4314 literal = !aft.first;
4315 after = literal ? subst(after, "\n", " ") : aft.second;
4317 if (!before.empty()) {
4319 before.erase(before.length() - 1, 1);
4320 bef = convert_latexed_command_inset_arg(before);
4321 literal |= !bef.first;
4322 before = literal ? subst(before, "\n", " ") : bef.second;
4323 if (literal && !after.empty())
4324 after = subst(after, "\n", " ");
4326 string lit = literal ? "\"true\"" : "\"false\"";
4327 begin_command_inset(os, "citation", command);
4328 os << "after " << '"' << after << "\"\n"
4329 << "before " << '"' << before << "\"\n"
4330 << "key " << '"' << citation << "\"\n"
4331 << "literal " << lit << "\n";
4333 // Need to set the cite engine if jurabib is loaded by
4334 // the document class directly
4335 if (preamble.citeEngine() == "basic")
4336 preamble.citeEngine("jurabib");
4340 if (t.cs() == "cite"
4341 || t.cs() == "nocite") {
4342 context.check_layout(os);
4343 string after = p.getArg('[', ']');
4344 pair<bool, string> aft = convert_latexed_command_inset_arg(after);
4345 bool const literal = !aft.first;
4346 after = literal ? subst(after, "\n", " ") : aft.second;
4347 string lit = literal ? "\"true\"" : "\"false\"";
4348 string key = convert_literate_command_inset_arg(p.verbatim_item());
4349 // store the case that it is "\nocite{*}" to use it later for
4352 begin_command_inset(os, "citation", t.cs());
4353 os << "after " << '"' << after << "\"\n"
4354 << "key " << '"' << key << "\"\n"
4355 << "literal " << lit << "\n";
4357 } else if (t.cs() == "nocite")
4362 if (t.cs() == "index" ||
4363 (t.cs() == "sindex" && preamble.use_indices() == "true")) {
4364 context.check_layout(os);
4365 string const arg = (t.cs() == "sindex" && p.hasOpt()) ?
4366 p.getArg('[', ']') : "";
4367 string const kind = arg.empty() ? "idx" : arg;
4368 begin_inset(os, "Index ");
4369 os << kind << "\nstatus collapsed\n";
4370 parse_text_in_inset(p, os, FLAG_ITEM, false, context, "Index");
4373 preamble.registerAutomaticallyLoadedPackage("splitidx");
4377 if (t.cs() == "nomenclature") {
4378 context.check_layout(os);
4379 begin_command_inset(os, "nomenclature", "nomenclature");
4380 string prefix = convert_literate_command_inset_arg(p.getArg('[', ']'));
4381 if (!prefix.empty())
4382 os << "prefix " << '"' << prefix << '"' << "\n";
4383 string symbol = p.verbatim_item();
4384 pair<bool, string> sym = convert_latexed_command_inset_arg(symbol);
4385 bool literal = !sym.first;
4386 string description = p.verbatim_item();
4387 pair<bool, string> desc = convert_latexed_command_inset_arg(description);
4388 literal |= !desc.first;
4390 symbol = subst(symbol, "\n", " ");
4391 description = subst(description, "\n", " ");
4393 symbol = sym.second;
4394 description = desc.second;
4396 string lit = literal ? "\"true\"" : "\"false\"";
4397 os << "symbol " << '"' << symbol;
4398 os << "\"\ndescription \""
4399 << description << "\"\n"
4400 << "literal " << lit << "\n";
4402 preamble.registerAutomaticallyLoadedPackage("nomencl");
4406 if (t.cs() == "label") {
4407 context.check_layout(os);
4408 begin_command_inset(os, "label", "label");
4410 << convert_literate_command_inset_arg(p.verbatim_item())
4416 if (t.cs() == "lyxmintcaption") {
4417 string const pos = p.getArg('[', ']');
4419 string const caption =
4420 parse_text_snippet(p, FLAG_ITEM, false,
4422 minted_nonfloat_caption = "[t]" + caption;
4424 // We already got the caption at the bottom,
4425 // so simply skip it.
4426 parse_text_snippet(p, FLAG_ITEM, false, context);
4431 if (t.cs() == "printindex" || t.cs() == "printsubindex") {
4432 context.check_layout(os);
4433 string commandname = t.cs();
4435 if (p.next_token().asInput() == "*") {
4440 begin_command_inset(os, "index_print", commandname);
4441 string const indexname = p.getArg('[', ']');
4443 if (indexname.empty())
4444 os << "type \"idx\"\n";
4446 os << "type \"" << indexname << "\"\n";
4447 os << "literal \"true\"\n";
4450 skip_spaces_braces(p);
4451 preamble.registerAutomaticallyLoadedPackage("makeidx");
4452 if (preamble.use_indices() == "true")
4453 preamble.registerAutomaticallyLoadedPackage("splitidx");
4457 if (t.cs() == "printnomenclature") {
4459 string width_type = "";
4460 context.check_layout(os);
4461 begin_command_inset(os, "nomencl_print", "printnomenclature");
4462 // case of a custom width
4464 width = p.getArg('[', ']');
4465 width = translate_len(width);
4466 width_type = "custom";
4468 // case of no custom width
4469 // the case of no custom width but the width set
4470 // via \settowidth{\nomlabelwidth}{***} cannot be supported
4471 // because the user could have set anything, not only the width
4472 // of the longest label (which would be width_type = "auto")
4473 string label = convert_literate_command_inset_arg(p.getArg('{', '}'));
4474 if (label.empty() && width_type.empty())
4475 width_type = "none";
4476 os << "set_width \"" << width_type << "\"\n";
4477 if (width_type == "custom")
4478 os << "width \"" << width << '\"';
4480 skip_spaces_braces(p);
4481 preamble.registerAutomaticallyLoadedPackage("nomencl");
4485 if ((t.cs() == "textsuperscript" || t.cs() == "textsubscript")) {
4486 context.check_layout(os);
4487 begin_inset(os, "script ");
4488 os << t.cs().substr(4) << '\n';
4489 newinsetlayout = findInsetLayout(context.textclass, t.cs(), true);
4490 parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
4492 if (t.cs() == "textsubscript")
4493 preamble.registerAutomaticallyLoadedPackage("subscript");
4497 if ((where = is_known(t.cs(), known_quotes))) {
4498 context.check_layout(os);
4499 begin_inset(os, "Quotes ");
4500 string quotetype = known_coded_quotes[where - known_quotes];
4501 // try to make a smart guess about the side
4502 Token const prev = p.prev_token();
4503 bool const opening = (prev.cat() != catSpace && prev.character() != 0
4504 && prev.character() != '\n' && prev.character() != '~');
4505 quotetype = guessQuoteStyle(quotetype, opening);
4508 // LyX adds {} after the quote, so we have to eat
4509 // spaces here if there are any before a possible
4511 eat_whitespace(p, os, context, false);
4516 if ((where = is_known(t.cs(), known_sizes)) &&
4517 context.new_layout_allowed) {
4518 context.check_layout(os);
4519 TeXFont const oldFont = context.font;
4520 context.font.size = known_coded_sizes[where - known_sizes];
4521 output_font_change(os, oldFont, context.font);
4522 eat_whitespace(p, os, context, false);
4526 if ((where = is_known(t.cs(), known_font_families)) &&
4527 context.new_layout_allowed) {
4528 context.check_layout(os);
4529 TeXFont const oldFont = context.font;
4530 context.font.family =
4531 known_coded_font_families[where - known_font_families];
4532 output_font_change(os, oldFont, context.font);
4533 eat_whitespace(p, os, context, false);
4537 if ((where = is_known(t.cs(), known_font_series)) &&
4538 context.new_layout_allowed) {
4539 context.check_layout(os);
4540 TeXFont const oldFont = context.font;
4541 context.font.series =
4542 known_coded_font_series[where - known_font_series];
4543 output_font_change(os, oldFont, context.font);
4544 eat_whitespace(p, os, context, false);
4548 if ((where = is_known(t.cs(), known_font_shapes)) &&
4549 context.new_layout_allowed) {
4550 context.check_layout(os);
4551 TeXFont const oldFont = context.font;
4552 context.font.shape =
4553 known_coded_font_shapes[where - known_font_shapes];
4554 output_font_change(os, oldFont, context.font);
4555 eat_whitespace(p, os, context, false);
4558 if ((where = is_known(t.cs(), known_old_font_families)) &&
4559 context.new_layout_allowed) {
4560 context.check_layout(os);
4561 TeXFont const oldFont = context.font;
4562 context.font.init();
4563 context.font.size = oldFont.size;
4564 context.font.family =
4565 known_coded_font_families[where - known_old_font_families];
4566 output_font_change(os, oldFont, context.font);
4567 eat_whitespace(p, os, context, false);
4571 if ((where = is_known(t.cs(), known_old_font_series)) &&
4572 context.new_layout_allowed) {
4573 context.check_layout(os);
4574 TeXFont const oldFont = context.font;
4575 context.font.init();
4576 context.font.size = oldFont.size;
4577 context.font.series =
4578 known_coded_font_series[where - known_old_font_series];
4579 output_font_change(os, oldFont, context.font);
4580 eat_whitespace(p, os, context, false);
4584 if ((where = is_known(t.cs(), known_old_font_shapes)) &&
4585 context.new_layout_allowed) {
4586 context.check_layout(os);
4587 TeXFont const oldFont = context.font;
4588 context.font.init();
4589 context.font.size = oldFont.size;
4590 context.font.shape =
4591 known_coded_font_shapes[where - known_old_font_shapes];
4592 output_font_change(os, oldFont, context.font);
4593 eat_whitespace(p, os, context, false);
4597 if (t.cs() == "selectlanguage") {
4598 context.check_layout(os);
4599 // save the language for the case that a
4600 // \foreignlanguage is used
4601 context.font.language = babel2lyx(p.verbatim_item());
4602 os << "\n\\lang " << context.font.language << "\n";
4606 if (t.cs() == "foreignlanguage") {
4607 string const lang = babel2lyx(p.verbatim_item());
4608 parse_text_attributes(p, os, FLAG_ITEM, outer,
4610 context.font.language, lang);
4614 if (prefixIs(t.cs(), "text") && preamble.usePolyglossia()
4615 && is_known(t.cs().substr(4), preamble.polyglossia_languages)) {
4616 // scheme is \textLANGUAGE{text} where LANGUAGE is in polyglossia_languages[]
4618 // We have to output the whole command if it has an option
4619 // because LyX doesn't support this yet, see bug #8214,
4620 // only if there is a single option specifying a variant, we can handle it.
4622 string langopts = p.getOpt();
4623 // check if the option contains a variant, if yes, extract it
4624 string::size_type pos_var = langopts.find("variant");
4625 string::size_type i = langopts.find(',');
4626 string::size_type k = langopts.find('=', pos_var);
4627 if (pos_var != string::npos && i == string::npos) {
4629 variant = langopts.substr(k + 1, langopts.length() - k - 2);
4630 lang = preamble.polyglossia2lyx(variant);
4631 parse_text_attributes(p, os, FLAG_ITEM, outer,
4633 context.font.language, lang);
4635 output_ert_inset(os, t.asInput() + langopts, context);
4637 lang = preamble.polyglossia2lyx(t.cs().substr(4, string::npos));
4638 parse_text_attributes(p, os, FLAG_ITEM, outer,
4640 context.font.language, lang);
4645 if (t.cs() == "inputencoding") {
4646 // nothing to write here
4647 string const enc = subst(p.verbatim_item(), "\n", " ");
4648 p.setEncoding(enc, Encoding::inputenc);
4652 if (is_known(t.cs(), known_special_chars) ||
4653 (t.cs() == "protect" &&
4654 p.next_token().cat() == catEscape &&
4655 is_known(p.next_token().cs(), known_special_protect_chars))) {
4656 // LyX sometimes puts a \protect in front, so we have to ignore it
4658 t.cs() == "protect" ? p.get_token().cs() : t.cs(),
4659 known_special_chars);
4660 context.check_layout(os);
4661 os << known_coded_special_chars[where - known_special_chars];
4662 skip_spaces_braces(p);
4666 if ((t.cs() == "nobreakdash" && p.next_token().asInput() == "-") ||
4667 (t.cs() == "protect" && p.next_token().asInput() == "\\nobreakdash" &&
4668 p.next_next_token().asInput() == "-") ||
4669 (t.cs() == "@" && p.next_token().asInput() == ".")) {
4670 // LyX sometimes puts a \protect in front, so we have to ignore it
4671 if (t.cs() == "protect")
4673 context.check_layout(os);
4674 if (t.cs() == "nobreakdash")
4675 os << "\\SpecialChar nobreakdash\n";
4677 os << "\\SpecialChar endofsentence\n";
4682 if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
4683 || t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
4684 || t.cs() == "%" || t.cs() == "-") {
4685 context.check_layout(os);
4687 os << "\\SpecialChar softhyphen\n";
4693 if (t.cs() == "char") {
4694 context.check_layout(os);
4695 if (p.next_token().character() == '`') {
4697 if (p.next_token().cs() == "\"") {
4702 output_ert_inset(os, "\\char`", context);
4705 output_ert_inset(os, "\\char", context);
4710 if (t.cs() == "verb") {
4711 context.check_layout(os);
4712 // set catcodes to verbatim early, just in case.
4713 p.setCatcodes(VERBATIM_CATCODES);
4714 string delim = p.get_token().asInput();
4715 Parser::Arg arg = p.verbatimStuff(delim);
4717 output_ert_inset(os, "\\verb" + delim
4718 + arg.second + delim, context);
4720 cerr << "invalid \\verb command. Skipping" << endl;
4724 // Problem: \= creates a tabstop inside the tabbing environment
4725 // and else an accent. In the latter case we really would want
4726 // \={o} instead of \= o.
4727 if (t.cs() == "=" && (flags & FLAG_TABBING)) {
4728 output_ert_inset(os, t.asInput(), context);
4732 if (t.cs() == "\\") {
4733 context.check_layout(os);
4735 output_ert_inset(os, "\\\\" + p.getOpt(), context);
4736 else if (p.next_token().asInput() == "*") {
4738 // getOpt() eats the following space if there
4739 // is no optional argument, but that is OK
4740 // here since it has no effect in the output.
4741 output_ert_inset(os, "\\\\*" + p.getOpt(), context);
4744 begin_inset(os, "Newline newline");
4750 if (t.cs() == "newline" ||
4751 (t.cs() == "linebreak" && !p.hasOpt())) {
4752 context.check_layout(os);
4753 begin_inset(os, "Newline ");
4756 skip_spaces_braces(p);
4760 if (t.cs() == "input" || t.cs() == "include"
4761 || t.cs() == "verbatiminput") {
4762 string name = t.cs();
4763 if (t.cs() == "verbatiminput"
4764 && p.next_token().asInput() == "*")
4765 name += p.get_token().asInput();
4766 context.check_layout(os);
4767 string filename(normalize_filename(p.getArg('{', '}')));
4768 string const path = getMasterFilePath(true);
4769 // We want to preserve relative / absolute filenames,
4770 // therefore path is only used for testing
4771 if ((t.cs() == "include" || t.cs() == "input") &&
4772 !makeAbsPath(filename, path).exists()) {
4773 // The file extension is probably missing.
4774 // Now try to find it out.
4775 string const tex_name =
4776 find_file(filename, path,
4777 known_tex_extensions);
4778 if (!tex_name.empty())
4779 filename = tex_name;
4781 bool external = false;
4783 if (makeAbsPath(filename, path).exists()) {
4784 string const abstexname =
4785 makeAbsPath(filename, path).absFileName();
4786 string const absfigname =
4787 changeExtension(abstexname, ".fig");
4788 fix_child_filename(filename);
4789 string const lyxname = changeExtension(filename,
4790 roundtripMode() ? ".lyx.lyx" : ".lyx");
4791 string const abslyxname = makeAbsPath(
4792 lyxname, getParentFilePath(false)).absFileName();
4794 if (!skipChildren())
4795 external = FileName(absfigname).exists();
4796 if (t.cs() == "input" && !skipChildren()) {
4797 string const ext = getExtension(abstexname);
4799 // Combined PS/LaTeX:
4800 // x.eps, x.pstex_t (old xfig)
4801 // x.pstex, x.pstex_t (new xfig, e.g. 3.2.5)
4802 FileName const absepsname(
4803 changeExtension(abstexname, ".eps"));
4804 FileName const abspstexname(
4805 changeExtension(abstexname, ".pstex"));
4806 bool const xfigeps =
4807 (absepsname.exists() ||
4808 abspstexname.exists()) &&
4811 // Combined PDF/LaTeX:
4812 // x.pdf, x.pdftex_t (old xfig)
4813 // x.pdf, x.pdf_t (new xfig, e.g. 3.2.5)
4814 FileName const abspdfname(
4815 changeExtension(abstexname, ".pdf"));
4816 bool const xfigpdf =
4817 abspdfname.exists() &&
4818 (ext == "pdftex_t" || ext == "pdf_t");
4822 // Combined PS/PDF/LaTeX:
4823 // x_pspdftex.eps, x_pspdftex.pdf, x.pspdftex
4824 string const absbase2(
4825 removeExtension(abstexname) + "_pspdftex");
4826 FileName const abseps2name(
4827 addExtension(absbase2, ".eps"));
4828 FileName const abspdf2name(
4829 addExtension(absbase2, ".pdf"));
4830 bool const xfigboth =
4831 abspdf2name.exists() &&
4832 abseps2name.exists() && ext == "pspdftex";
4834 xfig = xfigpdf || xfigeps || xfigboth;
4835 external = external && xfig;
4838 outname = changeExtension(filename, ".fig");
4839 FileName abssrc(changeExtension(abstexname, ".fig"));
4840 copy_file(abssrc, outname);
4842 // Don't try to convert, the result
4843 // would be full of ERT.
4845 FileName abssrc(abstexname);
4846 copy_file(abssrc, outname);
4847 } else if (t.cs() != "verbatiminput" &&
4849 tex2lyx(abstexname, FileName(abslyxname),
4852 // no need to call copy_file
4853 // tex2lyx creates the file
4856 FileName abssrc(abstexname);
4857 copy_file(abssrc, outname);
4860 cerr << "Warning: Could not find included file '"
4861 << filename << "'." << endl;
4865 begin_inset(os, "External\n");
4866 os << "\ttemplate XFig\n"
4867 << "\tfilename " << outname << '\n';
4868 registerExternalTemplatePackages("XFig");
4870 begin_command_inset(os, "include", name);
4871 outname = subst(outname, "\"", "\\\"");
4872 os << "preview false\n"
4873 "filename \"" << outname << "\"\n";
4874 if (t.cs() == "verbatiminput")
4875 preamble.registerAutomaticallyLoadedPackage("verbatim");
4881 if (t.cs() == "bibliographystyle") {
4882 // store new bibliographystyle
4883 bibliographystyle = p.verbatim_item();
4884 // If any other command than \bibliography, \addcontentsline
4885 // and \nocite{*} follows, we need to output the style
4886 // (because it might be used by that command).
4887 // Otherwise, it will automatically be output by LyX.
4890 for (Token t2 = p.get_token(); p.good(); t2 = p.get_token()) {
4891 if (t2.cat() == catBegin)
4893 if (t2.cat() != catEscape)
4895 if (t2.cs() == "nocite") {
4896 if (p.getArg('{', '}') == "*")
4898 } else if (t2.cs() == "bibliography")
4900 else if (t2.cs() == "phantomsection") {
4904 else if (t2.cs() == "addcontentsline") {
4905 // get the 3 arguments of \addcontentsline
4908 contentslineContent = p.getArg('{', '}');
4909 // if the last argument is not \refname we must output
4910 if (contentslineContent == "\\refname")
4917 output_ert_inset(os,
4918 "\\bibliographystyle{" + bibliographystyle + '}',
4924 if (t.cs() == "phantomsection") {
4925 // we only support this if it occurs between
4926 // \bibliographystyle and \bibliography
4927 if (bibliographystyle.empty())
4928 output_ert_inset(os, "\\phantomsection", context);
4932 if (t.cs() == "addcontentsline") {
4933 context.check_layout(os);
4934 // get the 3 arguments of \addcontentsline
4935 string const one = p.getArg('{', '}');
4936 string const two = p.getArg('{', '}');
4937 string const three = p.getArg('{', '}');
4938 // only if it is a \refname, we support if for the bibtex inset
4939 if (contentslineContent != "\\refname") {
4940 output_ert_inset(os,
4941 "\\addcontentsline{" + one + "}{" + two + "}{"+ three + '}',
4947 else if (t.cs() == "bibliography") {
4948 context.check_layout(os);
4950 begin_command_inset(os, "bibtex", "bibtex");
4951 if (!btprint.empty()) {
4952 os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
4953 // clear the string because the next BibTeX inset can be without the
4954 // \nocite{*} option
4957 os << "bibfiles " << '"' << normalize_filename(p.verbatim_item()) << '"' << "\n";
4958 // Do we have addcontentsline?
4959 if (contentslineContent == "\\refname") {
4960 BibOpts = "bibtotoc";
4961 // clear string because next BibTeX inset can be without addcontentsline
4962 contentslineContent.clear();
4964 // Do we have a bibliographystyle set?
4965 if (!bibliographystyle.empty()) {
4966 if (BibOpts.empty())
4967 BibOpts = normalize_filename(bibliographystyle);
4969 BibOpts = BibOpts + ',' + normalize_filename(bibliographystyle);
4970 // clear it because each bibtex entry has its style
4971 // and we need an empty string to handle \phantomsection
4972 bibliographystyle.clear();
4974 os << "options " << '"' << BibOpts << '"' << "\n";
4979 if (t.cs() == "printbibliography") {
4980 context.check_layout(os);
4982 string bbloptions = p.hasOpt() ? p.getArg('[', ']') : string();
4983 vector<string> opts = getVectorFromString(bbloptions);
4984 vector<string>::iterator it =
4985 find(opts.begin(), opts.end(), "heading=bibintoc");
4986 if (it != opts.end()) {
4988 BibOpts = "bibtotoc";
4990 bbloptions = getStringFromVector(opts);
4991 begin_command_inset(os, "bibtex", "bibtex");
4992 if (!btprint.empty()) {
4993 os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
4994 // clear the string because the next BibTeX inset can be without the
4995 // \nocite{*} option
4999 for (auto const & bf : preamble.biblatex_bibliographies) {
5000 if (!bibfiles.empty())
5002 bibfiles += normalize_filename(bf);
5004 if (!bibfiles.empty())
5005 os << "bibfiles " << '"' << bibfiles << '"' << "\n";
5006 // Do we have addcontentsline?
5007 if (contentslineContent == "\\refname") {
5008 BibOpts = "bibtotoc";
5009 // clear string because next BibTeX inset can be without addcontentsline
5010 contentslineContent.clear();
5012 os << "options " << '"' << BibOpts << '"' << "\n";
5013 if (!bbloptions.empty())
5014 os << "biblatexopts " << '"' << bbloptions << '"' << "\n";
5016 need_commentbib = false;
5020 if (t.cs() == "bibbysection") {
5021 context.check_layout(os);
5023 string bbloptions = p.hasOpt() ? p.getArg('[', ']') : string();
5024 vector<string> opts = getVectorFromString(bbloptions);
5025 vector<string>::iterator it =
5026 find(opts.begin(), opts.end(), "heading=bibintoc");
5027 if (it != opts.end()) {
5029 BibOpts = "bibtotoc";
5031 bbloptions = getStringFromVector(opts);
5032 begin_command_inset(os, "bibtex", "bibtex");
5033 os << "btprint " << '"' << "bibbysection" << '"' << "\n";
5035 for (auto const & bf : preamble.biblatex_bibliographies) {
5036 if (!bibfiles.empty())
5038 bibfiles += normalize_filename(bf);
5040 if (!bibfiles.empty())
5041 os << "bibfiles " << '"' << bibfiles << '"' << "\n";
5042 os << "options " << '"' << BibOpts << '"' << "\n";
5043 if (!bbloptions.empty())
5044 os << "biblatexopts " << '"' << bbloptions << '"' << "\n";
5046 need_commentbib = false;
5050 if (t.cs() == "parbox") {
5051 // Test whether this is an outer box of a shaded box
5053 // swallow arguments
5054 while (p.hasOpt()) {
5056 p.skip_spaces(true);
5059 p.skip_spaces(true);
5061 if (p.next_token().cat() == catBegin)
5063 p.skip_spaces(true);
5064 Token to = p.get_token();
5065 bool shaded = false;
5066 if (to.asInput() == "\\begin") {
5067 p.skip_spaces(true);
5068 if (p.getArg('{', '}') == "shaded")
5073 parse_outer_box(p, os, FLAG_ITEM, outer,
5074 context, "parbox", "shaded");
5076 parse_box(p, os, 0, FLAG_ITEM, outer, context,
5077 "", "", t.cs(), "", "");
5081 if (t.cs() == "fbox" || t.cs() == "mbox" ||
5082 t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
5083 t.cs() == "shadowbox" || t.cs() == "doublebox") {
5084 parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), "");
5088 if (t.cs() == "fcolorbox" || t.cs() == "colorbox") {
5089 string backgroundcolor;
5090 preamble.registerAutomaticallyLoadedPackage("xcolor");
5091 if (t.cs() == "fcolorbox") {
5092 string const framecolor = p.getArg('{', '}');
5093 backgroundcolor = p.getArg('{', '}');
5094 parse_box(p, os, 0, 0, outer, context, "", "", "", framecolor, backgroundcolor);
5096 backgroundcolor = p.getArg('{', '}');
5097 parse_box(p, os, 0, 0, outer, context, "", "", "", "", backgroundcolor);
5102 // FIXME: due to the compiler limit of "if" nestings
5103 // the code for the alignment was put here
5104 // put them in their own if if this is fixed
5105 if (t.cs() == "fboxrule" || t.cs() == "fboxsep"
5106 || t.cs() == "shadowsize"
5107 || t.cs() == "raggedleft" || t.cs() == "centering"
5108 || t.cs() == "raggedright") {
5109 if (t.cs() == "fboxrule")
5111 if (t.cs() == "fboxsep")
5113 if (t.cs() == "shadowsize")
5115 if (t.cs() != "raggedleft" && t.cs() != "centering"
5116 && t.cs() != "raggedright") {
5117 p.skip_spaces(true);
5118 while (p.good() && p.next_token().cat() != catSpace
5119 && p.next_token().cat() != catNewline
5120 && p.next_token().cat() != catEscape) {
5121 if (t.cs() == "fboxrule")
5122 fboxrule = fboxrule + p.get_token().asInput();
5123 if (t.cs() == "fboxsep")
5124 fboxsep = fboxsep + p.get_token().asInput();
5125 if (t.cs() == "shadowsize")
5126 shadow_size = shadow_size + p.get_token().asInput();
5129 output_ert_inset(os, t.asInput(), context);
5134 //\framebox() is part of the picture environment and different from \framebox{}
5135 //\framebox{} will be parsed by parse_outer_box
5136 if (t.cs() == "framebox") {
5137 if (p.next_token().character() == '(') {
5138 //the syntax is: \framebox(x,y)[position]{content}
5139 string arg = t.asInput();
5140 arg += p.getFullParentheseArg();
5141 arg += p.getFullOpt();
5142 eat_whitespace(p, os, context, false);
5143 output_ert_inset(os, arg + '{', context);
5144 parse_text(p, os, FLAG_ITEM, outer, context);
5145 output_ert_inset(os, "}", context);
5147 //the syntax is: \framebox[width][position]{content}
5148 string special = p.getFullOpt();
5149 special += p.getOpt();
5150 parse_outer_box(p, os, FLAG_ITEM, outer,
5151 context, t.cs(), special);
5156 //\makebox() is part of the picture environment and different from \makebox{}
5157 //\makebox{} will be parsed by parse_box
5158 if (t.cs() == "makebox") {
5159 if (p.next_token().character() == '(') {
5160 //the syntax is: \makebox(x,y)[position]{content}
5161 string arg = t.asInput();
5162 arg += p.getFullParentheseArg();
5163 arg += p.getFullOpt();
5164 eat_whitespace(p, os, context, false);
5165 output_ert_inset(os, arg + '{', context);
5166 parse_text(p, os, FLAG_ITEM, outer, context);
5167 output_ert_inset(os, "}", context);
5169 //the syntax is: \makebox[width][position]{content}
5170 parse_box(p, os, 0, FLAG_ITEM, outer, context,
5171 "", "", t.cs(), "", "");
5175 if (t.cs() == "smallskip" ||
5176 t.cs() == "medskip" ||
5177 t.cs() == "bigskip" ||
5178 t.cs() == "vfill") {
5179 context.check_layout(os);
5180 begin_inset(os, "VSpace ");
5183 skip_spaces_braces(p);
5187 if ((where = is_known(t.cs(), known_spaces))) {
5188 context.check_layout(os);
5189 begin_inset(os, "space ");
5190 os << '\\' << known_coded_spaces[where - known_spaces]
5193 // LaTeX swallows whitespace after all spaces except
5194 // "\\,". We have to do that here, too, because LyX
5195 // adds "{}" which would make the spaces significant.
5197 eat_whitespace(p, os, context, false);
5198 // LyX adds "{}" after all spaces except "\\ " and
5199 // "\\,", so we have to remove "{}".
5200 // "\\,{}" is equivalent to "\\," in LaTeX, so we
5201 // remove the braces after "\\,", too.
5207 if (t.cs() == "newpage" ||
5208 (t.cs() == "pagebreak" && !p.hasOpt()) ||
5209 t.cs() == "clearpage" ||
5210 t.cs() == "cleardoublepage") {
5211 context.check_layout(os);
5212 begin_inset(os, "Newpage ");
5215 skip_spaces_braces(p);
5219 if (t.cs() == "DeclareRobustCommand" ||
5220 t.cs() == "DeclareRobustCommandx" ||
5221 t.cs() == "newcommand" ||
5222 t.cs() == "newcommandx" ||
5223 t.cs() == "providecommand" ||
5224 t.cs() == "providecommandx" ||
5225 t.cs() == "renewcommand" ||
5226 t.cs() == "renewcommandx") {
5227 // DeclareRobustCommand, DeclareRobustCommandx,
5228 // providecommand and providecommandx could be handled
5229 // by parse_command(), but we need to call
5230 // add_known_command() here.
5231 string name = t.asInput();
5232 if (p.next_token().asInput() == "*") {
5233 // Starred form. Eat '*'
5237 string const command = p.verbatim_item();
5238 string const opt1 = p.getFullOpt();
5239 string const opt2 = p.getFullOpt();
5240 add_known_command(command, opt1, !opt2.empty());
5241 string const ert = name + '{' + command + '}' +
5243 '{' + p.verbatim_item() + '}';
5245 if (t.cs() == "DeclareRobustCommand" ||
5246 t.cs() == "DeclareRobustCommandx" ||
5247 t.cs() == "providecommand" ||
5248 t.cs() == "providecommandx" ||
5249 name[name.length()-1] == '*')
5250 output_ert_inset(os, ert, context);
5252 context.check_layout(os);
5253 begin_inset(os, "FormulaMacro");
5260 if (t.cs() == "let" && p.next_token().asInput() != "*") {
5261 // let could be handled by parse_command(),
5262 // but we need to call add_known_command() here.
5263 string ert = t.asInput();
5266 if (p.next_token().cat() == catBegin) {
5267 name = p.verbatim_item();
5268 ert += '{' + name + '}';
5270 name = p.verbatim_item();
5275 if (p.next_token().cat() == catBegin) {
5276 command = p.verbatim_item();
5277 ert += '{' + command + '}';
5279 command = p.verbatim_item();
5282 // If command is known, make name known too, to parse
5283 // its arguments correctly. For this reason we also
5284 // have commands in syntax.default that are hardcoded.
5285 CommandMap::iterator it = known_commands.find(command);
5286 if (it != known_commands.end())
5287 known_commands[t.asInput()] = it->second;
5288 output_ert_inset(os, ert, context);
5292 if (t.cs() == "hspace" || t.cs() == "vspace") {
5295 string name = t.asInput();
5296 string const length = p.verbatim_item();
5299 bool valid = splitLatexLength(length, valstring, unit);
5300 bool known_hspace = false;
5301 bool known_vspace = false;
5302 bool known_unit = false;
5305 istringstream iss(valstring);
5308 if (t.cs()[0] == 'h') {
5309 if (unit == "\\fill") {
5314 known_hspace = true;
5317 if (unit == "\\smallskipamount") {
5319 known_vspace = true;
5320 } else if (unit == "\\medskipamount") {
5322 known_vspace = true;
5323 } else if (unit == "\\bigskipamount") {
5325 known_vspace = true;
5326 } else if (unit == "\\fill") {
5328 known_vspace = true;
5332 if (!known_hspace && !known_vspace) {
5333 switch (unitFromString(unit)) {
5349 //unitFromString(unit) fails for relative units like Length::PCW
5350 // therefore handle them separately
5351 if (unit == "\\paperwidth" || unit == "\\columnwidth"
5352 || unit == "\\textwidth" || unit == "\\linewidth"
5353 || unit == "\\textheight" || unit == "\\paperheight"
5354 || unit == "\\baselineskip")
5362 // check for glue lengths
5363 bool is_gluelength = false;
5364 string gluelength = length;
5365 string::size_type i = length.find(" minus");
5366 if (i == string::npos) {
5367 i = length.find(" plus");
5368 if (i != string::npos)
5369 is_gluelength = true;
5371 is_gluelength = true;
5372 // if yes transform "9xx minus 8yy plus 7zz"
5374 if (is_gluelength) {
5375 i = gluelength.find(" minus");
5376 if (i != string::npos)
5377 gluelength.replace(i, 7, "-");
5378 i = gluelength.find(" plus");
5379 if (i != string::npos)
5380 gluelength.replace(i, 6, "+");
5383 if (t.cs()[0] == 'h' && (known_unit || known_hspace || is_gluelength)) {
5384 // Literal horizontal length or known variable
5385 context.check_layout(os);
5386 begin_inset(os, "space ");
5394 if (known_unit && !known_hspace)
5395 os << "\n\\length " << translate_len(length);
5397 os << "\n\\length " << gluelength;
5399 } else if (known_unit || known_vspace || is_gluelength) {
5400 // Literal vertical length or known variable
5401 context.check_layout(os);
5402 begin_inset(os, "VSpace ");
5405 if (known_unit && !known_vspace)
5406 os << translate_len(length);
5413 // LyX can't handle other length variables in Inset VSpace/space
5418 output_ert_inset(os, name + '{' + unit + '}', context);
5419 else if (value == -1.0)
5420 output_ert_inset(os, name + "{-" + unit + '}', context);
5422 output_ert_inset(os, name + '{' + valstring + unit + '}', context);
5424 output_ert_inset(os, name + '{' + length + '}', context);
5429 // Before we look for the layout name alone below, we check the layouts including the LateXParam, which
5430 // might be one or several options or a star.
5431 // The single '=' is meant here.
5432 if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true, p.getCommandLatexParam()))) {
5436 context.check_layout(os);
5437 // store the latexparam here. This is eaten in parse_text_in_inset
5438 context.latexparam = newinsetlayout->latexparam();
5439 docstring name = newinsetlayout->name();
5440 bool const caption = name.find(from_ascii("Caption:")) == 0;
5442 // Already done for floating minted listings.
5443 if (minted_float.empty()) {
5444 begin_inset(os, "Caption ");
5445 os << to_utf8(name.substr(8)) << '\n';
5448 // FIXME: what do we do if the prefix is not Flex: ?
5449 if (prefixIs(name, from_ascii("Flex:")))
5451 begin_inset(os, "Flex ");
5452 os << to_utf8(name) << '\n'
5453 << "status collapsed\n";
5455 if (!minted_float.empty()) {
5456 parse_text_snippet(p, os, FLAG_ITEM, false, context);
5457 } else if (newinsetlayout->isPassThru()) {
5458 // set catcodes to verbatim early, just in case.
5459 p.setCatcodes(VERBATIM_CATCODES);
5460 string delim = p.get_token().asInput();
5462 cerr << "Warning: bad delimiter for command " << t.asInput() << endl;
5463 //FIXME: handle error condition
5464 string const arg = p.verbatimStuff("}").second;
5465 Context newcontext(true, context.textclass);
5466 if (newinsetlayout->forcePlainLayout())
5467 newcontext.layout = &context.textclass.plainLayout();
5468 output_ert(os, arg, newcontext);
5470 parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
5471 context.latexparam.clear();
5474 // Minted caption insets are not closed here because
5475 // we collect everything into the caption.
5476 if (minted_float.empty())
5481 // The single '=' is meant here.
5482 if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true))) {
5486 context.check_layout(os);
5487 docstring const name = newinsetlayout->name();
5488 bool const caption = name.find(from_ascii("Caption:")) == 0;
5490 // Already done for floating minted listings.
5491 if (minted_float.empty()) {
5492 begin_inset(os, "Caption ");
5493 os << to_utf8(name.substr(8)) << '\n';
5496 begin_inset(os, "Flex ");
5497 os << to_utf8(name) << '\n'
5498 << "status collapsed\n";
5500 if (!minted_float.empty()) {
5501 parse_text_snippet(p, os, FLAG_ITEM, false, context);
5502 } else if (newinsetlayout->isPassThru()) {
5503 // set catcodes to verbatim early, just in case.
5504 p.setCatcodes(VERBATIM_CATCODES);
5505 string delim = p.get_token().asInput();
5507 cerr << "Warning: bad delimiter for command " << t.asInput() << endl;
5508 //FIXME: handle error condition
5509 string const arg = p.verbatimStuff("}").second;
5510 Context newcontext(true, context.textclass);
5511 if (newinsetlayout->forcePlainLayout())
5512 newcontext.layout = &context.textclass.plainLayout();
5513 output_ert(os, arg, newcontext);
5515 parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
5518 // Minted caption insets are not closed here because
5519 // we collect everything into the caption.
5520 if (minted_float.empty())
5525 if (t.cs() == "includepdf") {
5527 string const arg = p.getArg('[', ']');
5528 map<string, string> opts;
5529 vector<string> keys;
5530 split_map(arg, opts, keys);
5531 string name = normalize_filename(p.verbatim_item());
5532 string const path = getMasterFilePath(true);
5533 // We want to preserve relative / absolute filenames,
5534 // therefore path is only used for testing
5535 if (!makeAbsPath(name, path).exists()) {
5536 // The file extension is probably missing.
5537 // Now try to find it out.
5538 char const * const pdfpages_format[] = {"pdf", 0};
5539 string const pdftex_name =
5540 find_file(name, path, pdfpages_format);
5541 if (!pdftex_name.empty()) {
5546 FileName const absname = makeAbsPath(name, path);
5547 if (absname.exists())
5549 fix_child_filename(name);
5550 copy_file(absname, name);
5552 cerr << "Warning: Could not find file '"
5553 << name << "'." << endl;
5555 context.check_layout(os);
5556 begin_inset(os, "External\n\ttemplate ");
5557 os << "PDFPages\n\tfilename "
5559 // parse the options
5560 if (opts.find("pages") != opts.end())
5561 os << "\textra LaTeX \"pages="
5562 << opts["pages"] << "\"\n";
5563 if (opts.find("angle") != opts.end())
5564 os << "\trotateAngle "
5565 << opts["angle"] << '\n';
5566 if (opts.find("origin") != opts.end()) {
5568 string const opt = opts["origin"];
5569 if (opt == "tl") ss << "topleft";
5570 if (opt == "bl") ss << "bottomleft";
5571 if (opt == "Bl") ss << "baselineleft";
5572 if (opt == "c") ss << "center";
5573 if (opt == "tc") ss << "topcenter";
5574 if (opt == "bc") ss << "bottomcenter";
5575 if (opt == "Bc") ss << "baselinecenter";
5576 if (opt == "tr") ss << "topright";
5577 if (opt == "br") ss << "bottomright";
5578 if (opt == "Br") ss << "baselineright";
5579 if (!ss.str().empty())
5580 os << "\trotateOrigin " << ss.str() << '\n';
5582 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
5584 if (opts.find("width") != opts.end())
5586 << translate_len(opts["width"]) << '\n';
5587 if (opts.find("height") != opts.end())
5589 << translate_len(opts["height"]) << '\n';
5590 if (opts.find("keepaspectratio") != opts.end())
5591 os << "\tkeepAspectRatio\n";
5593 context.check_layout(os);
5594 registerExternalTemplatePackages("PDFPages");
5598 if (t.cs() == "loadgame") {
5600 string name = normalize_filename(p.verbatim_item());
5601 string const path = getMasterFilePath(true);
5602 // We want to preserve relative / absolute filenames,
5603 // therefore path is only used for testing
5604 if (!makeAbsPath(name, path).exists()) {
5605 // The file extension is probably missing.
5606 // Now try to find it out.
5607 char const * const lyxskak_format[] = {"fen", 0};
5608 string const lyxskak_name =
5609 find_file(name, path, lyxskak_format);
5610 if (!lyxskak_name.empty())
5611 name = lyxskak_name;
5613 FileName const absname = makeAbsPath(name, path);
5614 if (absname.exists())
5616 fix_child_filename(name);
5617 copy_file(absname, name);
5619 cerr << "Warning: Could not find file '"
5620 << name << "'." << endl;
5621 context.check_layout(os);
5622 begin_inset(os, "External\n\ttemplate ");
5623 os << "ChessDiagram\n\tfilename "
5626 context.check_layout(os);
5627 // after a \loadgame follows a \showboard
5628 if (p.get_token().asInput() == "showboard")
5630 registerExternalTemplatePackages("ChessDiagram");
5634 // try to see whether the string is in unicodesymbols
5635 // Only use text mode commands, since we are in text mode here,
5636 // and math commands may be invalid (bug 6797)
5637 string name = t.asInput();
5638 // handle the dingbats, cyrillic and greek
5639 if (name == "\\ding" || name == "\\textcyr" ||
5640 (name == "\\textgreek" && !preamble.usePolyglossia()))
5641 name = name + '{' + p.getArg('{', '}') + '}';
5642 // handle the ifsym characters
5643 else if (name == "\\textifsymbol") {
5644 string const optif = p.getFullOpt();
5645 string const argif = p.getArg('{', '}');
5646 name = name + optif + '{' + argif + '}';
5648 // handle the \ascii characters
5649 // the case of \ascii within braces, as LyX outputs it, is already
5650 // handled for t.cat() == catBegin
5651 else if (name == "\\ascii") {
5652 // the code is "\asci\xxx"
5653 name = "{" + name + p.get_token().asInput() + "}";
5656 // handle some TIPA special characters
5657 else if (preamble.isPackageUsed("tipa")) {
5658 if (name == "\\s") {
5659 // fromLaTeXCommand() does not yet
5660 // recognize tipa short cuts
5661 name = "\\textsyllabic";
5662 } else if (name == "\\=" &&
5663 p.next_token().asInput() == "*") {
5664 // fromLaTeXCommand() does not yet
5665 // recognize tipa short cuts
5667 name = "\\textsubbar";
5668 } else if (name == "\\textdoublevertline") {
5669 // FIXME: This is not correct,
5670 // \textvertline is higher than \textbardbl
5671 name = "\\textbardbl";
5673 } else if (name == "\\!" ) {
5674 if (p.next_token().asInput() == "b") {
5675 p.get_token(); // eat 'b'
5678 } else if (p.next_token().asInput() == "d") {
5682 } else if (p.next_token().asInput() == "g") {
5686 } else if (p.next_token().asInput() == "G") {
5688 name = "\\texthtscg";
5690 } else if (p.next_token().asInput() == "j") {
5692 name = "\\texthtbardotlessj";
5694 } else if (p.next_token().asInput() == "o") {
5696 name = "\\textbullseye";
5699 } else if (name == "\\*" ) {
5700 if (p.next_token().asInput() == "k") {
5702 name = "\\textturnk";
5704 } else if (p.next_token().asInput() == "r") {
5705 p.get_token(); // eat 'b'
5706 name = "\\textturnr";
5708 } else if (p.next_token().asInput() == "t") {
5710 name = "\\textturnt";
5712 } else if (p.next_token().asInput() == "w") {
5714 name = "\\textturnw";
5719 if ((name.size() == 2 &&
5720 contains("\"'.=^`bcdHkrtuv~", name[1]) &&
5721 p.next_token().asInput() != "*") ||
5722 is_known(name.substr(1), known_tipa_marks)) {
5723 // name is a command that corresponds to a
5724 // combining character in unicodesymbols.
5725 // Append the argument, fromLaTeXCommand()
5726 // will either convert it to a single
5727 // character or a combining sequence.
5728 name += '{' + p.verbatim_item() + '}';
5730 // now get the character from unicodesymbols
5734 docstring s = normalize_c(encodings.fromLaTeXCommand(from_utf8(name),
5735 Encodings::TEXT_CMD, termination, rem, &req));
5737 context.check_layout(os);
5740 output_ert_inset(os, to_utf8(rem), context);
5742 skip_spaces_braces(p);
5743 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
5744 preamble.registerAutomaticallyLoadedPackage(*it);
5746 //cerr << "#: " << t << " mode: " << mode << endl;
5747 // heuristic: read up to next non-nested space
5749 string s = t.asInput();
5750 string z = p.verbatim_item();
5751 while (p.good() && z != " " && !z.empty()) {
5752 //cerr << "read: " << z << endl;
5754 z = p.verbatim_item();
5756 cerr << "found ERT: " << s << endl;
5757 output_ert_inset(os, s + ' ', context);
5760 if (t.asInput() == name &&
5761 p.next_token().asInput() == "*") {
5762 // Starred commands like \vspace*{}
5763 p.get_token(); // Eat '*'
5766 if (!parse_command(name, p, os, outer, context))
5767 output_ert_inset(os, name, context);
5773 string guessLanguage(Parser & p, string const & lang)
5775 typedef std::map<std::string, size_t> LangMap;
5776 // map from language names to number of characters
5779 for (char const * const * i = supported_CJK_languages; *i; i++)
5780 used[string(*i)] = 0;
5783 Token const t = p.get_token();
5784 // comments are not counted for any language
5785 if (t.cat() == catComment)
5787 // commands are not counted as well, but we need to detect
5788 // \begin{CJK} and switch encoding if needed
5789 if (t.cat() == catEscape) {
5790 if (t.cs() == "inputencoding") {
5791 string const enc = subst(p.verbatim_item(), "\n", " ");
5792 p.setEncoding(enc, Encoding::inputenc);
5795 if (t.cs() != "begin")
5798 // Non-CJK content is counted for lang.
5799 // We do not care about the real language here:
5800 // If we have more non-CJK contents than CJK contents,
5801 // we simply use the language that was specified as
5802 // babel main language.
5803 used[lang] += t.asInput().length();
5806 // Now we are starting an environment
5808 string const name = p.getArg('{', '}');
5809 if (name != "CJK") {
5813 // It is a CJK environment
5815 /* name = */ p.getArg('{', '}');
5816 string const encoding = p.getArg('{', '}');
5817 /* mapping = */ p.getArg('{', '}');
5818 string const encoding_old = p.getEncoding();
5819 char const * const * const where =
5820 is_known(encoding, supported_CJK_encodings);
5822 p.setEncoding(encoding, Encoding::CJK);
5824 p.setEncoding("UTF-8");
5825 string const text = p.ertEnvironment("CJK");
5826 p.setEncoding(encoding_old);
5829 // ignore contents in unknown CJK encoding
5832 // the language of the text
5834 supported_CJK_languages[where - supported_CJK_encodings];
5835 used[cjk] += text.length();
5837 LangMap::const_iterator use = used.begin();
5838 for (LangMap::const_iterator it = used.begin(); it != used.end(); ++it) {
5839 if (it->second > use->second)
5846 void check_comment_bib(ostream & os, Context & context)
5848 if (!need_commentbib)
5850 // We have a bibliography database, but no bibliography with biblatex
5851 // which is completely valid. Insert a bibtex inset in a note.
5852 context.check_layout(os);
5853 begin_inset(os, "Note Note\n");
5854 os << "status open\n";
5855 os << "\\begin_layout Plain Layout\n";
5856 begin_command_inset(os, "bibtex", "bibtex");
5858 for (auto const & bf : preamble.biblatex_bibliographies) {
5859 if (!bibfiles.empty())
5861 bibfiles += normalize_filename(bf);
5863 if (!bibfiles.empty())
5864 os << "bibfiles " << '"' << bibfiles << '"' << "\n";
5865 end_inset(os);// Bibtex
5866 os << "\\end_layout\n";
5867 end_inset(os);// Note