2 * \file tex2lyx/text.cpp
3 * This file is part of LyX, the document processor.
4 * Licence details can be found in the file COPYING.
7 * \author Jean-Marc Lasgouttes
10 * Full author contact details are available in file CREDITS.
21 #include "FloatList.h"
22 #include "LaTeXPackages.h"
27 #include "insets/ExternalTemplate.h"
29 #include "support/lassert.h"
30 #include "support/convert.h"
31 #include "support/FileName.h"
32 #include "support/filetools.h"
33 #include "support/lstrings.h"
34 #include "support/lyxtime.h"
43 using namespace lyx::support;
50 void output_arguments(ostream &, Parser &, bool, bool, bool, Context &,
51 Layout::LaTeXArgMap const &);
56 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
57 Context const & context, InsetLayout const * layout)
59 bool const forcePlainLayout =
60 layout ? layout->forcePlainLayout() : false;
61 Context newcontext(true, context.textclass);
63 newcontext.layout = &context.textclass.plainLayout();
65 newcontext.font = context.font;
67 output_arguments(os, p, outer, false, false, newcontext,
69 parse_text(p, os, flags, outer, newcontext);
71 output_arguments(os, p, outer, false, true, newcontext,
72 layout->postcommandargs());
73 newcontext.check_end_layout(os);
79 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
80 Context const & context, string const & name)
82 InsetLayout const * layout = 0;
83 DocumentClass::InsetLayouts::const_iterator it =
84 context.textclass.insetLayouts().find(from_ascii(name));
85 if (it != context.textclass.insetLayouts().end())
86 layout = &(it->second);
87 parse_text_in_inset(p, os, flags, outer, context, layout);
90 /// parses a paragraph snippet, useful for example for \\emph{...}
91 void parse_text_snippet(Parser & p, ostream & os, unsigned flags, bool outer,
94 Context newcontext(context);
95 // Don't inherit the paragraph-level extra stuff
96 newcontext.par_extra_stuff.clear();
97 parse_text(p, os, flags, outer, newcontext);
98 // Make sure that we don't create invalid .lyx files
99 context.need_layout = newcontext.need_layout;
100 context.need_end_layout = newcontext.need_end_layout;
105 * Thin wrapper around parse_text_snippet() using a string.
107 * We completely ignore \c context.need_layout and \c context.need_end_layout,
108 * because our return value is not used directly (otherwise the stream version
109 * of parse_text_snippet() could be used). That means that the caller needs
110 * to do layout management manually.
111 * This is intended to parse text that does not create any layout changes.
113 string parse_text_snippet(Parser & p, unsigned flags, const bool outer,
116 Context newcontext(context);
117 newcontext.need_layout = false;
118 newcontext.need_end_layout = false;
119 newcontext.new_layout_allowed = false;
120 // Avoid warning by Context::~Context()
121 newcontext.par_extra_stuff.clear();
123 parse_text_snippet(p, os, flags, outer, newcontext);
128 char const * const known_ref_commands[] = { "ref", "pageref", "vref",
129 "vpageref", "prettyref", "nameref", "eqref", 0 };
131 char const * const known_coded_ref_commands[] = { "ref", "pageref", "vref",
132 "vpageref", "formatted", "nameref", "eqref", 0 };
134 char const * const known_refstyle_commands[] = { "algref", "chapref", "corref",
135 "eqref", "enuref", "figref", "fnref", "lemref", "parref", "partref", "propref",
136 "secref", "subref", "tabref", "thmref", 0 };
138 char const * const known_refstyle_prefixes[] = { "alg", "chap", "cor",
139 "eq", "enu", "fig", "fn", "lem", "par", "part", "prop",
140 "sec", "sub", "tab", "thm", 0 };
144 * supported CJK encodings
145 * JIS does not work with LyX's encoding conversion
147 const char * const supported_CJK_encodings[] = {
148 "EUC-JP", "KS", "GB", "UTF8",
149 "Bg5", /*"JIS",*/ "SJIS", 0};
152 * the same as supported_CJK_encodings with their corresponding LyX language name
153 * FIXME: The mapping "UTF8" => "chinese-traditional" is only correct for files
155 * NOTE: "Bg5", "JIS" and "SJIS" are not supported by LyX, on re-export the
156 * encodings "UTF8", "EUC-JP" and "EUC-JP" will be used.
157 * please keep this in sync with supported_CJK_encodings line by line!
159 const char * const supported_CJK_languages[] = {
160 "japanese-cjk", "korean", "chinese-simplified", "chinese-traditional",
161 "chinese-traditional", /*"japanese-cjk",*/ "japanese-cjk", 0};
165 * The starred forms are also known except for "citefullauthor",
166 * "citeyear" and "citeyearpar".
168 char const * const known_natbib_commands[] = { "cite", "citet", "citep",
169 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
170 "citefullauthor", "Citet", "Citep", "Citealt", "Citealp", "Citeauthor", 0 };
174 * No starred form other than "cite*" known.
176 char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
177 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
178 // jurabib commands not (yet) supported by LyX:
180 // "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
181 // "footciteauthor", "footciteyear", "footciteyearpar",
182 "citefield", "citetitle", 0 };
184 /// LaTeX names for quotes
185 char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
186 "guillemotright", "frqq", "fg", "glq", "glqq", "textquoteleft", "grq", "grqq",
187 "quotedblbase", "textquotedblleft", "quotesinglbase", "textquoteright", "flq",
188 "guilsinglleft", "frq", "guilsinglright", 0};
190 /// the same as known_quotes with .lyx names
191 char const * const known_coded_quotes[] = { "prd", "ard", "ard", "ard",
192 "ald", "ald", "ald", "gls", "gld", "els", "els", "grd",
193 "gld", "grd", "gls", "ers", "fls",
194 "fls", "frs", "frs", 0};
196 /// LaTeX names for font sizes
197 char const * const known_sizes[] = { "tiny", "scriptsize", "footnotesize",
198 "small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
200 /// the same as known_sizes with .lyx names
201 char const * const known_coded_sizes[] = { "tiny", "scriptsize", "footnotesize",
202 "small", "normal", "large", "larger", "largest", "huge", "giant", 0};
204 /// LaTeX 2.09 names for font families
205 char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
207 /// LaTeX names for font families
208 char const * const known_font_families[] = { "rmfamily", "sffamily",
211 /// LaTeX names for font family changing commands
212 char const * const known_text_font_families[] = { "textrm", "textsf",
215 /// The same as known_old_font_families, known_font_families and
216 /// known_text_font_families with .lyx names
217 char const * const known_coded_font_families[] = { "roman", "sans",
220 /// LaTeX 2.09 names for font series
221 char const * const known_old_font_series[] = { "bf", 0};
223 /// LaTeX names for font series
224 char const * const known_font_series[] = { "bfseries", "mdseries", 0};
226 /// LaTeX names for font series changing commands
227 char const * const known_text_font_series[] = { "textbf", "textmd", 0};
229 /// The same as known_old_font_series, known_font_series and
230 /// known_text_font_series with .lyx names
231 char const * const known_coded_font_series[] = { "bold", "medium", 0};
233 /// LaTeX 2.09 names for font shapes
234 char const * const known_old_font_shapes[] = { "it", "sl", "sc", 0};
236 /// LaTeX names for font shapes
237 char const * const known_font_shapes[] = { "itshape", "slshape", "scshape",
240 /// LaTeX names for font shape changing commands
241 char const * const known_text_font_shapes[] = { "textit", "textsl", "textsc",
244 /// The same as known_old_font_shapes, known_font_shapes and
245 /// known_text_font_shapes with .lyx names
246 char const * const known_coded_font_shapes[] = { "italic", "slanted",
247 "smallcaps", "up", 0};
249 /// Known special characters which need skip_spaces_braces() afterwards
250 char const * const known_special_chars[] = {"ldots",
251 "lyxarrow", "textcompwordmark",
252 "slash", "textasciitilde", "textasciicircum", "textbackslash",
253 "LyX", "TeX", "LaTeXe",
256 /// special characters from known_special_chars which may have a \\protect before
257 char const * const known_special_protect_chars[] = {"LyX", "TeX",
258 "LaTeXe", "LaTeX", 0};
260 /// the same as known_special_chars with .lyx names
261 char const * const known_coded_special_chars[] = {"\\SpecialChar ldots\n",
262 "\\SpecialChar menuseparator\n", "\\SpecialChar ligaturebreak\n",
263 "\\SpecialChar breakableslash\n", "~", "^", "\n\\backslash\n",
264 "\\SpecialChar LyX\n", "\\SpecialChar TeX\n", "\\SpecialChar LaTeX2e\n",
265 "\\SpecialChar LaTeX\n", 0};
268 * Graphics file extensions known by the dvips driver of the graphics package.
269 * These extensions are used to complete the filename of an included
270 * graphics file if it does not contain an extension.
271 * The order must be the same that latex uses to find a file, because we
272 * will use the first extension that matches.
273 * This is only an approximation for the common cases. If we would want to
274 * do it right in all cases, we would need to know which graphics driver is
275 * used and know the extensions of every driver of the graphics package.
277 char const * const known_dvips_graphics_formats[] = {"eps", "ps", "eps.gz",
278 "ps.gz", "eps.Z", "ps.Z", 0};
281 * Graphics file extensions known by the pdftex driver of the graphics package.
282 * \sa known_dvips_graphics_formats
284 char const * const known_pdftex_graphics_formats[] = {"png", "pdf", "jpg",
288 * Known file extensions for TeX files as used by \\include.
290 char const * const known_tex_extensions[] = {"tex", 0};
292 /// spaces known by InsetSpace
293 char const * const known_spaces[] = { " ", "space", ",",
294 "thinspace", "quad", "qquad", "enspace", "enskip",
295 "negthinspace", "negmedspace", "negthickspace", "textvisiblespace",
296 "hfill", "dotfill", "hrulefill", "leftarrowfill", "rightarrowfill",
297 "upbracefill", "downbracefill", 0};
299 /// the same as known_spaces with .lyx names
300 char const * const known_coded_spaces[] = { "space{}", "space{}",
301 "thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
302 "negthinspace{}", "negmedspace{}", "negthickspace{}", "textvisiblespace{}",
303 "hfill{}", "dotfill{}", "hrulefill{}", "leftarrowfill{}", "rightarrowfill{}",
304 "upbracefill{}", "downbracefill{}", 0};
306 /// known TIPA combining diacritical marks
307 char const * const known_tipa_marks[] = {"textsubwedge", "textsubumlaut",
308 "textsubtilde", "textseagull", "textsubbridge", "textinvsubbridge",
309 "textsubsquare", "textsubrhalfring", "textsublhalfring", "textsubplus",
310 "textovercross", "textsubarch", "textsuperimposetilde", "textraising",
311 "textlowering", "textadvancing", "textretracting", "textdoublegrave",
312 "texthighrise", "textlowrise", "textrisefall", "textsyllabic",
315 /// TIPA tones that need special handling
316 char const * const known_tones[] = {"15", "51", "45", "12", "454", 0};
318 // string to store the float type to be able to determine the type of subfloats
319 string float_type = "";
322 /// splits "x=z, y=b" into a map and an ordered keyword vector
323 void split_map(string const & s, map<string, string> & res, vector<string> & keys)
328 keys.resize(v.size());
329 for (size_t i = 0; i < v.size(); ++i) {
330 size_t const pos = v[i].find('=');
331 string const index = trimSpaceAndEol(v[i].substr(0, pos));
332 string const value = trimSpaceAndEol(v[i].substr(pos + 1, string::npos));
340 * Split a LaTeX length into value and unit.
341 * The latter can be a real unit like "pt", or a latex length variable
342 * like "\textwidth". The unit may contain additional stuff like glue
343 * lengths, but we don't care, because such lengths are ERT anyway.
344 * \returns true if \p value and \p unit are valid.
346 bool splitLatexLength(string const & len, string & value, string & unit)
350 const string::size_type i = len.find_first_not_of(" -+0123456789.,");
351 //'4,5' is a valid LaTeX length number. Change it to '4.5'
352 string const length = subst(len, ',', '.');
353 if (i == string::npos)
356 if (len[0] == '\\') {
357 // We had something like \textwidth without a factor
363 value = trimSpaceAndEol(string(length, 0, i));
367 // 'cM' is a valid LaTeX length unit. Change it to 'cm'
368 if (contains(len, '\\'))
369 unit = trimSpaceAndEol(string(len, i));
371 unit = ascii_lowercase(trimSpaceAndEol(string(len, i)));
376 /// A simple function to translate a latex length to something LyX can
377 /// understand. Not perfect, but rather best-effort.
378 bool translate_len(string const & length, string & valstring, string & unit)
380 if (!splitLatexLength(length, valstring, unit))
382 // LyX uses percent values
384 istringstream iss(valstring);
389 string const percentval = oss.str();
391 if (unit.empty() || unit[0] != '\\')
393 string::size_type const i = unit.find(' ');
394 string const endlen = (i == string::npos) ? string() : string(unit, i);
395 if (unit == "\\textwidth") {
396 valstring = percentval;
397 unit = "text%" + endlen;
398 } else if (unit == "\\columnwidth") {
399 valstring = percentval;
400 unit = "col%" + endlen;
401 } else if (unit == "\\paperwidth") {
402 valstring = percentval;
403 unit = "page%" + endlen;
404 } else if (unit == "\\linewidth") {
405 valstring = percentval;
406 unit = "line%" + endlen;
407 } else if (unit == "\\paperheight") {
408 valstring = percentval;
409 unit = "pheight%" + endlen;
410 } else if (unit == "\\textheight") {
411 valstring = percentval;
412 unit = "theight%" + endlen;
420 string translate_len(string const & length)
424 if (translate_len(length, value, unit))
426 // If the input is invalid, return what we have.
434 * Translates a LaTeX length into \p value, \p unit and
435 * \p special parts suitable for a box inset.
436 * The difference from translate_len() is that a box inset knows about
437 * some special "units" that are stored in \p special.
439 void translate_box_len(string const & length, string & value, string & unit, string & special)
441 if (translate_len(length, value, unit)) {
442 if (unit == "\\height" || unit == "\\depth" ||
443 unit == "\\totalheight" || unit == "\\width") {
444 special = unit.substr(1);
445 // The unit is not used, but LyX requires a dummy setting
458 * Find a file with basename \p name in path \p path and an extension
461 string find_file(string const & name, string const & path,
462 char const * const * extensions)
464 for (char const * const * what = extensions; *what; ++what) {
465 string const trial = addExtension(name, *what);
466 if (makeAbsPath(trial, path).exists())
473 void begin_inset(ostream & os, string const & name)
475 os << "\n\\begin_inset " << name;
479 void begin_command_inset(ostream & os, string const & name,
480 string const & latexname)
482 begin_inset(os, "CommandInset ");
483 os << name << "\nLatexCommand " << latexname << '\n';
487 void end_inset(ostream & os)
489 os << "\n\\end_inset\n\n";
493 bool skip_braces(Parser & p)
495 if (p.next_token().cat() != catBegin)
498 if (p.next_token().cat() == catEnd) {
507 /// replace LaTeX commands in \p s from the unicodesymbols file with their
509 docstring convert_unicodesymbols(docstring s)
512 for (size_t i = 0; i < s.size();) {
521 docstring parsed = encodings.fromLaTeXCommand(s,
522 Encodings::TEXT_CMD, termination, rem, &req);
523 set<string>::const_iterator it = req.begin();
524 set<string>::const_iterator en = req.end();
525 for (; it != en; ++it)
526 preamble.registerAutomaticallyLoadedPackage(*it);
529 if (s.empty() || s[0] != '\\')
538 /// try to convert \p s to a valid InsetCommand argument
539 string convert_command_inset_arg(string s)
542 // since we don't know the input encoding we can't use from_utf8
543 s = to_utf8(convert_unicodesymbols(from_ascii(s)));
544 // LyX cannot handle newlines in a latex command
545 return subst(s, "\n", " ");
549 void output_ert(ostream & os, string const & s, Context & context)
551 context.check_layout(os);
552 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
554 os << "\n\\backslash\n";
555 else if (*it == '\n') {
556 context.new_paragraph(os);
557 context.check_layout(os);
561 context.check_end_layout(os);
565 void output_ert_inset(ostream & os, string const & s, Context & context)
567 // We must have a valid layout before outputting the ERT inset.
568 context.check_layout(os);
569 Context newcontext(true, context.textclass);
570 InsetLayout const & layout = context.textclass.insetLayout(from_ascii("ERT"));
571 if (layout.forcePlainLayout())
572 newcontext.layout = &context.textclass.plainLayout();
573 begin_inset(os, "ERT");
574 os << "\nstatus collapsed\n";
575 output_ert(os, s, newcontext);
580 Layout const * findLayout(TextClass const & textclass, string const & name, bool command)
582 Layout const * layout = findLayoutWithoutModule(textclass, name, command);
585 if (checkModule(name, command))
586 return findLayoutWithoutModule(textclass, name, command);
591 InsetLayout const * findInsetLayout(TextClass const & textclass, string const & name, bool command)
593 InsetLayout const * insetlayout = findInsetLayoutWithoutModule(textclass, name, command);
596 if (checkModule(name, command))
597 return findInsetLayoutWithoutModule(textclass, name, command);
602 void eat_whitespace(Parser &, ostream &, Context &, bool);
606 * Skips whitespace and braces.
607 * This should be called after a command has been parsed that is not put into
608 * ERT, and where LyX adds "{}" if needed.
610 void skip_spaces_braces(Parser & p, bool keepws = false)
612 /* The following four examples produce the same typeset output and
613 should be handled by this function:
621 // Unfortunately we need to skip comments, too.
622 // We can't use eat_whitespace since writing them after the {}
623 // results in different output in some cases.
624 bool const skipped_spaces = p.skip_spaces(true);
625 bool const skipped_braces = skip_braces(p);
626 if (keepws && skipped_spaces && !skipped_braces)
627 // put back the space (it is better handled by check_space)
628 p.unskip_spaces(true);
632 void output_arguments(ostream & os, Parser & p, bool outer, bool need_layout, bool post,
633 Context & context, Layout::LaTeXArgMap const & latexargs)
636 context.check_layout(os);
641 Layout::LaTeXArgMap::const_iterator lait = latexargs.begin();
642 Layout::LaTeXArgMap::const_iterator const laend = latexargs.end();
643 for (; lait != laend; ++lait) {
645 eat_whitespace(p, os, context, false);
646 if (lait->second.mandatory) {
647 if (p.next_token().cat() != catBegin)
649 p.get_token(); // eat '{'
651 context.check_layout(os);
654 begin_inset(os, "Argument ");
657 os << i << "\nstatus collapsed\n\n";
658 parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
661 if (p.next_token().cat() == catEscape ||
662 p.next_token().character() != '[')
664 p.get_token(); // eat '['
666 context.check_layout(os);
669 begin_inset(os, "Argument ");
672 os << i << "\nstatus collapsed\n\n";
673 parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
676 eat_whitespace(p, os, context, false);
681 void output_command_layout(ostream & os, Parser & p, bool outer,
682 Context & parent_context,
683 Layout const * newlayout)
685 TeXFont const oldFont = parent_context.font;
686 // save the current font size
687 string const size = oldFont.size;
688 // reset the font size to default, because the font size switches
689 // don't affect section headings and the like
690 parent_context.font.size = Context::normalfont.size;
691 // we only need to write the font change if we have an open layout
692 if (!parent_context.atParagraphStart())
693 output_font_change(os, oldFont, parent_context.font);
694 parent_context.check_end_layout(os);
695 Context context(true, parent_context.textclass, newlayout,
696 parent_context.layout, parent_context.font);
697 if (parent_context.deeper_paragraph) {
698 // We are beginning a nested environment after a
699 // deeper paragraph inside the outer list environment.
700 // Therefore we don't need to output a "begin deeper".
701 context.need_end_deeper = true;
703 context.check_deeper(os);
704 output_arguments(os, p, outer, true, false, context,
705 context.layout->latexargs());
706 parse_text(p, os, FLAG_ITEM, outer, context);
707 output_arguments(os, p, outer, false, true, context,
708 context.layout->postcommandargs());
709 context.check_end_layout(os);
710 if (parent_context.deeper_paragraph) {
711 // We must suppress the "end deeper" because we
712 // suppressed the "begin deeper" above.
713 context.need_end_deeper = false;
715 context.check_end_deeper(os);
716 // We don't need really a new paragraph, but
717 // we must make sure that the next item gets a \begin_layout.
718 parent_context.new_paragraph(os);
719 // Set the font size to the original value. No need to output it here
720 // (Context::begin_layout() will do that if needed)
721 parent_context.font.size = size;
726 * Output a space if necessary.
727 * This function gets called for every whitespace token.
729 * We have three cases here:
730 * 1. A space must be suppressed. Example: The lyxcode case below
731 * 2. A space may be suppressed. Example: Spaces before "\par"
732 * 3. A space must not be suppressed. Example: A space between two words
734 * We currently handle only 1. and 3 and from 2. only the case of
735 * spaces before newlines as a side effect.
737 * 2. could be used to suppress as many spaces as possible. This has two effects:
738 * - Reimporting LyX generated LaTeX files changes almost no whitespace
739 * - Superflous whitespace from non LyX generated LaTeX files is removed.
740 * The drawback is that the logic inside the function becomes
741 * complicated, and that is the reason why it is not implemented.
743 void check_space(Parser & p, ostream & os, Context & context)
745 Token const next = p.next_token();
746 Token const curr = p.curr_token();
747 // A space before a single newline and vice versa must be ignored
748 // LyX emits a newline before \end{lyxcode}.
749 // This newline must be ignored,
750 // otherwise LyX will add an additional protected space.
751 if (next.cat() == catSpace ||
752 next.cat() == catNewline ||
753 (next.cs() == "end" && context.layout->free_spacing && curr.cat() == catNewline)) {
756 context.check_layout(os);
762 * Parse all arguments of \p command
764 void parse_arguments(string const & command,
765 vector<ArgumentType> const & template_arguments,
766 Parser & p, ostream & os, bool outer, Context & context)
768 string ert = command;
769 size_t no_arguments = template_arguments.size();
770 for (size_t i = 0; i < no_arguments; ++i) {
771 switch (template_arguments[i]) {
774 // This argument contains regular LaTeX
775 output_ert_inset(os, ert + '{', context);
776 eat_whitespace(p, os, context, false);
777 if (template_arguments[i] == required)
778 parse_text(p, os, FLAG_ITEM, outer, context);
780 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
784 // This argument consists only of a single item.
785 // The presence of '{' or not must be preserved.
787 if (p.next_token().cat() == catBegin)
788 ert += '{' + p.verbatim_item() + '}';
790 ert += p.verbatim_item();
794 // This argument may contain special characters
795 ert += '{' + p.verbatim_item() + '}';
799 // true because we must not eat whitespace
800 // if an optional arg follows we must not strip the
801 // brackets from this one
802 if (i < no_arguments - 1 &&
803 template_arguments[i+1] == optional)
804 ert += p.getFullOpt(true);
806 ert += p.getOpt(true);
810 output_ert_inset(os, ert, context);
815 * Check whether \p command is a known command. If yes,
816 * handle the command with all arguments.
817 * \return true if the command was parsed, false otherwise.
819 bool parse_command(string const & command, Parser & p, ostream & os,
820 bool outer, Context & context)
822 if (known_commands.find(command) != known_commands.end()) {
823 parse_arguments(command, known_commands[command], p, os,
831 /// Parses a minipage or parbox
832 void parse_box(Parser & p, ostream & os, unsigned outer_flags,
833 unsigned inner_flags, bool outer, Context & parent_context,
834 string const & outer_type, string const & special,
835 string const & inner_type)
839 string hor_pos = "c";
840 // We need to set the height to the LaTeX default of 1\\totalheight
841 // for the case when no height argument is given
842 string height_value = "1";
843 string height_unit = "in";
844 string height_special = "totalheight";
849 string width_special = "none";
850 if (!inner_type.empty() && p.hasOpt()) {
851 if (inner_type != "makebox")
852 position = p.getArg('[', ']');
854 latex_width = p.getArg('[', ']');
855 translate_box_len(latex_width, width_value, width_unit, width_special);
858 if (position != "t" && position != "c" && position != "b") {
859 cerr << "invalid position " << position << " for "
860 << inner_type << endl;
864 if (inner_type != "makebox") {
865 latex_height = p.getArg('[', ']');
866 translate_box_len(latex_height, height_value, height_unit, height_special);
868 string const opt = p.getArg('[', ']');
871 if (hor_pos != "l" && hor_pos != "c" &&
872 hor_pos != "r" && hor_pos != "s") {
873 cerr << "invalid hor_pos " << hor_pos
874 << " for " << inner_type << endl;
881 inner_pos = p.getArg('[', ']');
882 if (inner_pos != "c" && inner_pos != "t" &&
883 inner_pos != "b" && inner_pos != "s") {
884 cerr << "invalid inner_pos "
885 << inner_pos << " for "
886 << inner_type << endl;
887 inner_pos = position;
892 if (inner_type.empty()) {
893 if (special.empty() && outer_type != "framebox")
894 latex_width = "1\\columnwidth";
897 latex_width = p2.getArg('[', ']');
898 string const opt = p2.getArg('[', ']');
901 if (hor_pos != "l" && hor_pos != "c" &&
902 hor_pos != "r" && hor_pos != "s") {
903 cerr << "invalid hor_pos " << hor_pos
904 << " for " << outer_type << endl;
909 } else if (inner_type != "makebox")
910 latex_width = p.verbatim_item();
911 // if e.g. only \ovalbox{content} was used, set the width to 1\columnwidth
912 // as this is LyX's standard for such cases (except for makebox)
913 // \framebox is more special and handled below
914 if (latex_width.empty() && inner_type != "makebox"
915 && outer_type != "framebox")
916 latex_width = "1\\columnwidth";
918 translate_len(latex_width, width_value, width_unit);
920 bool shadedparbox = false;
921 if (inner_type == "shaded") {
922 eat_whitespace(p, os, parent_context, false);
923 if (outer_type == "parbox") {
925 if (p.next_token().cat() == catBegin)
927 eat_whitespace(p, os, parent_context, false);
933 // If we already read the inner box we have to push the inner env
934 if (!outer_type.empty() && !inner_type.empty() &&
935 (inner_flags & FLAG_END))
936 active_environments.push_back(inner_type);
937 // LyX can't handle length variables
938 bool use_ert = contains(width_unit, '\\') || contains(height_unit, '\\');
939 if (!use_ert && !outer_type.empty() && !inner_type.empty()) {
940 // Look whether there is some content after the end of the
941 // inner box, but before the end of the outer box.
942 // If yes, we need to output ERT.
944 if (inner_flags & FLAG_END)
945 p.ertEnvironment(inner_type);
949 bool const outer_env(outer_type == "framed" || outer_type == "minipage");
950 if ((outer_env && p.next_token().asInput() != "\\end") ||
951 (!outer_env && p.next_token().cat() != catEnd)) {
952 // something is between the end of the inner box and
953 // the end of the outer box, so we need to use ERT.
958 // if only \makebox{content} was used we can set its width to 1\width
959 // because this identic and also identic to \mbox
960 // this doesn't work for \framebox{content}, thus we have to use ERT for this
961 if (latex_width.empty() && inner_type == "makebox") {
964 width_special = "width";
965 } else if (latex_width.empty() && outer_type == "framebox") {
968 width_special = "none";
972 if (!outer_type.empty()) {
973 if (outer_flags & FLAG_END)
974 ss << "\\begin{" << outer_type << '}';
976 ss << '\\' << outer_type << '{';
977 if (!special.empty())
981 if (!inner_type.empty()) {
982 if (inner_type != "shaded") {
983 if (inner_flags & FLAG_END)
984 ss << "\\begin{" << inner_type << '}';
986 ss << '\\' << inner_type;
988 if (!position.empty())
989 ss << '[' << position << ']';
990 if (!latex_height.empty())
991 ss << '[' << latex_height << ']';
992 if (!inner_pos.empty())
993 ss << '[' << inner_pos << ']';
994 ss << '{' << latex_width << '}';
995 if (!(inner_flags & FLAG_END))
998 if (inner_type == "shaded")
999 ss << "\\begin{shaded}";
1000 output_ert_inset(os, ss.str(), parent_context);
1001 if (!inner_type.empty()) {
1002 parse_text(p, os, inner_flags, outer, parent_context);
1003 if (inner_flags & FLAG_END)
1004 output_ert_inset(os, "\\end{" + inner_type + '}',
1007 output_ert_inset(os, "}", parent_context);
1009 if (!outer_type.empty()) {
1010 // If we already read the inner box we have to pop
1012 if (!inner_type.empty() && (inner_flags & FLAG_END))
1013 active_environments.pop_back();
1015 // Ensure that the end of the outer box is parsed correctly:
1016 // The opening brace has been eaten by parse_outer_box()
1017 if (!outer_type.empty() && (outer_flags & FLAG_ITEM)) {
1018 outer_flags &= ~FLAG_ITEM;
1019 outer_flags |= FLAG_BRACE_LAST;
1021 parse_text(p, os, outer_flags, outer, parent_context);
1022 if (outer_flags & FLAG_END)
1023 output_ert_inset(os, "\\end{" + outer_type + '}',
1026 output_ert_inset(os, "}", parent_context);
1029 // LyX does not like empty positions, so we have
1030 // to set them to the LaTeX default values here.
1031 if (position.empty())
1033 if (inner_pos.empty())
1034 inner_pos = position;
1035 parent_context.check_layout(os);
1036 begin_inset(os, "Box ");
1037 if (outer_type == "framed")
1039 else if (outer_type == "framebox" || outer_type == "fbox")
1041 else if (outer_type == "shadowbox")
1042 os << "Shadowbox\n";
1043 else if ((outer_type == "shaded" && inner_type.empty()) ||
1044 (outer_type == "minipage" && inner_type == "shaded") ||
1045 (outer_type == "parbox" && inner_type == "shaded")) {
1047 preamble.registerAutomaticallyLoadedPackage("color");
1048 } else if (outer_type == "doublebox")
1049 os << "Doublebox\n";
1050 else if (outer_type.empty() || outer_type == "mbox")
1051 os << "Frameless\n";
1053 os << outer_type << '\n';
1054 os << "position \"" << position << "\"\n";
1055 os << "hor_pos \"" << hor_pos << "\"\n";
1056 if (outer_type == "mbox")
1057 os << "has_inner_box 1\n";
1059 os << "has_inner_box " << !inner_type.empty() << "\n";
1060 os << "inner_pos \"" << inner_pos << "\"\n";
1061 os << "use_parbox " << (inner_type == "parbox" || shadedparbox)
1063 if (outer_type == "mbox")
1064 os << "use_makebox 1\n";
1066 os << "use_makebox " << (inner_type == "makebox") << '\n';
1067 if (outer_type == "fbox" || outer_type == "mbox")
1068 os << "width \"\"\n";
1070 os << "width \"" << width_value << width_unit << "\"\n";
1071 os << "special \"" << width_special << "\"\n";
1072 os << "height \"" << height_value << height_unit << "\"\n";
1073 os << "height_special \"" << height_special << "\"\n";
1074 os << "status open\n\n";
1076 // Unfortunately we can't use parse_text_in_inset:
1077 // InsetBox::forcePlainLayout() is hard coded and does not
1078 // use the inset layout. Apart from that do we call parse_text
1079 // up to two times, but need only one check_end_layout.
1080 bool const forcePlainLayout =
1081 (!inner_type.empty() || inner_type == "makebox") &&
1082 outer_type != "shaded" && outer_type != "framed";
1083 Context context(true, parent_context.textclass);
1084 if (forcePlainLayout)
1085 context.layout = &context.textclass.plainLayout();
1087 context.font = parent_context.font;
1089 // If we have no inner box the contents will be read with the outer box
1090 if (!inner_type.empty())
1091 parse_text(p, os, inner_flags, outer, context);
1093 // Ensure that the end of the outer box is parsed correctly:
1094 // The opening brace has been eaten by parse_outer_box()
1095 if (!outer_type.empty() && (outer_flags & FLAG_ITEM)) {
1096 outer_flags &= ~FLAG_ITEM;
1097 outer_flags |= FLAG_BRACE_LAST;
1100 // Find end of outer box, output contents if inner_type is
1101 // empty and output possible comments
1102 if (!outer_type.empty()) {
1103 // If we already read the inner box we have to pop
1105 if (!inner_type.empty() && (inner_flags & FLAG_END))
1106 active_environments.pop_back();
1107 // This does not output anything but comments if
1108 // inner_type is not empty (see use_ert)
1109 parse_text(p, os, outer_flags, outer, context);
1112 context.check_end_layout(os);
1114 #ifdef PRESERVE_LAYOUT
1115 // LyX puts a % after the end of the minipage
1116 if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
1118 //output_ert_inset(os, "%dummy", parent_context);
1121 parent_context.new_paragraph(os);
1123 else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
1124 //output_ert_inset(os, "%dummy", parent_context);
1127 // We add a protected space if something real follows
1128 if (p.good() && p.next_token().cat() != catComment) {
1129 begin_inset(os, "space ~\n");
1138 void parse_outer_box(Parser & p, ostream & os, unsigned flags, bool outer,
1139 Context & parent_context, string const & outer_type,
1140 string const & special)
1142 eat_whitespace(p, os, parent_context, false);
1143 if (flags & FLAG_ITEM) {
1145 if (p.next_token().cat() == catBegin)
1148 cerr << "Warning: Ignoring missing '{' after \\"
1149 << outer_type << '.' << endl;
1150 eat_whitespace(p, os, parent_context, false);
1153 unsigned int inner_flags = 0;
1155 if (outer_type == "minipage" || outer_type == "parbox") {
1156 p.skip_spaces(true);
1157 while (p.hasOpt()) {
1159 p.skip_spaces(true);
1162 p.skip_spaces(true);
1163 if (outer_type == "parbox") {
1165 if (p.next_token().cat() == catBegin)
1167 p.skip_spaces(true);
1170 if (outer_type == "shaded" || outer_type == "fbox"
1171 || outer_type == "mbox") {
1172 // These boxes never have an inner box
1174 } else if (p.next_token().asInput() == "\\parbox") {
1175 inner = p.get_token().cs();
1176 inner_flags = FLAG_ITEM;
1177 } else if (p.next_token().asInput() == "\\begin") {
1178 // Is this a minipage or shaded box?
1181 inner = p.getArg('{', '}');
1183 if (inner == "minipage" || inner == "shaded")
1184 inner_flags = FLAG_END;
1189 if (inner_flags == FLAG_END) {
1190 if (inner != "shaded")
1194 eat_whitespace(p, os, parent_context, false);
1196 parse_box(p, os, flags, FLAG_END, outer, parent_context,
1197 outer_type, special, inner);
1199 if (inner_flags == FLAG_ITEM) {
1201 eat_whitespace(p, os, parent_context, false);
1203 parse_box(p, os, flags, inner_flags, outer, parent_context,
1204 outer_type, special, inner);
1209 void parse_listings(Parser & p, ostream & os, Context & parent_context, bool in_line)
1211 parent_context.check_layout(os);
1212 begin_inset(os, "listings\n");
1214 string arg = p.verbatimOption();
1215 os << "lstparams " << '"' << arg << '"' << '\n';
1216 if (arg.find("\\color") != string::npos)
1217 preamble.registerAutomaticallyLoadedPackage("color");
1220 os << "inline true\n";
1222 os << "inline false\n";
1223 os << "status collapsed\n";
1224 Context context(true, parent_context.textclass);
1225 context.layout = &parent_context.textclass.plainLayout();
1228 // set catcodes to verbatim early, just in case.
1229 p.setCatcodes(VERBATIM_CATCODES);
1230 string delim = p.get_token().asInput();
1231 //FIXME: handler error condition
1232 s = p.verbatimStuff(delim).second;
1233 // context.new_paragraph(os);
1235 s = p.verbatimEnvironment("lstlisting");
1236 output_ert(os, s, context);
1241 /// parse an unknown environment
1242 void parse_unknown_environment(Parser & p, string const & name, ostream & os,
1243 unsigned flags, bool outer,
1244 Context & parent_context)
1246 if (name == "tabbing")
1247 // We need to remember that we have to handle '\=' specially
1248 flags |= FLAG_TABBING;
1250 // We need to translate font changes and paragraphs inside the
1251 // environment to ERT if we have a non standard font.
1252 // Otherwise things like
1253 // \large\begin{foo}\huge bar\end{foo}
1255 bool const specialfont =
1256 (parent_context.font != parent_context.normalfont);
1257 bool const new_layout_allowed = parent_context.new_layout_allowed;
1259 parent_context.new_layout_allowed = false;
1260 output_ert_inset(os, "\\begin{" + name + "}", parent_context);
1261 parse_text_snippet(p, os, flags, outer, parent_context);
1262 output_ert_inset(os, "\\end{" + name + "}", parent_context);
1264 parent_context.new_layout_allowed = new_layout_allowed;
1268 void parse_environment(Parser & p, ostream & os, bool outer,
1269 string & last_env, Context & parent_context)
1271 Layout const * newlayout;
1272 InsetLayout const * newinsetlayout = 0;
1273 string const name = p.getArg('{', '}');
1274 const bool is_starred = suffixIs(name, '*');
1275 string const unstarred_name = rtrim(name, "*");
1276 active_environments.push_back(name);
1278 if (is_math_env(name)) {
1279 parent_context.check_layout(os);
1280 begin_inset(os, "Formula ");
1281 os << "\\begin{" << name << "}";
1282 parse_math(p, os, FLAG_END, MATH_MODE);
1283 os << "\\end{" << name << "}";
1285 if (is_display_math_env(name)) {
1286 // Prevent the conversion of a line break to a space
1287 // (bug 7668). This does not change the output, but
1288 // looks ugly in LyX.
1289 eat_whitespace(p, os, parent_context, false);
1293 else if (is_known(name, preamble.polyglossia_languages)) {
1294 // We must begin a new paragraph if not already done
1295 if (! parent_context.atParagraphStart()) {
1296 parent_context.check_end_layout(os);
1297 parent_context.new_paragraph(os);
1299 // save the language in the context so that it is
1300 // handled by parse_text
1301 parent_context.font.language = preamble.polyglossia2lyx(name);
1302 parse_text(p, os, FLAG_END, outer, parent_context);
1303 // Just in case the environment is empty
1304 parent_context.extra_stuff.erase();
1305 // We must begin a new paragraph to reset the language
1306 parent_context.new_paragraph(os);
1310 else if (unstarred_name == "tabular" || name == "longtable") {
1311 eat_whitespace(p, os, parent_context, false);
1312 string width = "0pt";
1313 if (name == "tabular*") {
1314 width = lyx::translate_len(p.getArg('{', '}'));
1315 eat_whitespace(p, os, parent_context, false);
1317 parent_context.check_layout(os);
1318 begin_inset(os, "Tabular ");
1319 handle_tabular(p, os, name, width, parent_context);
1324 else if (parent_context.textclass.floats().typeExist(unstarred_name)) {
1325 eat_whitespace(p, os, parent_context, false);
1326 string const opt = p.hasOpt() ? p.getArg('[', ']') : string();
1327 eat_whitespace(p, os, parent_context, false);
1328 parent_context.check_layout(os);
1329 begin_inset(os, "Float " + unstarred_name + "\n");
1330 // store the float type for subfloats
1331 // subfloats only work with figures and tables
1332 if (unstarred_name == "figure")
1333 float_type = unstarred_name;
1334 else if (unstarred_name == "table")
1335 float_type = unstarred_name;
1339 os << "placement " << opt << '\n';
1340 if (contains(opt, "H"))
1341 preamble.registerAutomaticallyLoadedPackage("float");
1343 Floating const & fl = parent_context.textclass.floats()
1344 .getType(unstarred_name);
1345 if (!fl.floattype().empty() && fl.usesFloatPkg())
1346 preamble.registerAutomaticallyLoadedPackage("float");
1349 os << "wide " << convert<string>(is_starred)
1350 << "\nsideways false"
1351 << "\nstatus open\n\n";
1352 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1354 // We don't need really a new paragraph, but
1355 // we must make sure that the next item gets a \begin_layout.
1356 parent_context.new_paragraph(os);
1358 // the float is parsed thus delete the type
1362 else if (unstarred_name == "sidewaysfigure"
1363 || unstarred_name == "sidewaystable") {
1364 eat_whitespace(p, os, parent_context, false);
1365 parent_context.check_layout(os);
1366 if (unstarred_name == "sidewaysfigure")
1367 begin_inset(os, "Float figure\n");
1369 begin_inset(os, "Float table\n");
1370 os << "wide " << convert<string>(is_starred)
1371 << "\nsideways true"
1372 << "\nstatus open\n\n";
1373 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1375 // We don't need really a new paragraph, but
1376 // we must make sure that the next item gets a \begin_layout.
1377 parent_context.new_paragraph(os);
1379 preamble.registerAutomaticallyLoadedPackage("rotfloat");
1382 else if (name == "wrapfigure" || name == "wraptable") {
1383 // syntax is \begin{wrapfigure}[lines]{placement}[overhang]{width}
1384 eat_whitespace(p, os, parent_context, false);
1385 parent_context.check_layout(os);
1388 string overhang = "0col%";
1391 lines = p.getArg('[', ']');
1392 string const placement = p.getArg('{', '}');
1394 overhang = p.getArg('[', ']');
1395 string const width = p.getArg('{', '}');
1397 if (name == "wrapfigure")
1398 begin_inset(os, "Wrap figure\n");
1400 begin_inset(os, "Wrap table\n");
1401 os << "lines " << lines
1402 << "\nplacement " << placement
1403 << "\noverhang " << lyx::translate_len(overhang)
1404 << "\nwidth " << lyx::translate_len(width)
1405 << "\nstatus open\n\n";
1406 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1408 // We don't need really a new paragraph, but
1409 // we must make sure that the next item gets a \begin_layout.
1410 parent_context.new_paragraph(os);
1412 preamble.registerAutomaticallyLoadedPackage("wrapfig");
1415 else if (name == "minipage") {
1416 eat_whitespace(p, os, parent_context, false);
1417 // Test whether this is an outer box of a shaded box
1419 // swallow arguments
1420 while (p.hasOpt()) {
1422 p.skip_spaces(true);
1425 p.skip_spaces(true);
1426 Token t = p.get_token();
1427 bool shaded = false;
1428 if (t.asInput() == "\\begin") {
1429 p.skip_spaces(true);
1430 if (p.getArg('{', '}') == "shaded")
1435 parse_outer_box(p, os, FLAG_END, outer,
1436 parent_context, name, "shaded");
1438 parse_box(p, os, 0, FLAG_END, outer, parent_context,
1443 else if (name == "comment") {
1444 eat_whitespace(p, os, parent_context, false);
1445 parent_context.check_layout(os);
1446 begin_inset(os, "Note Comment\n");
1447 os << "status open\n";
1448 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1451 skip_braces(p); // eat {} that might by set by LyX behind comments
1452 preamble.registerAutomaticallyLoadedPackage("verbatim");
1455 else if (name == "verbatim") {
1456 // FIXME: this should go in the generic code that
1457 // handles environments defined in layout file that
1458 // have "PassThru 1". However, the code over there is
1459 // already too complicated for my taste.
1460 parent_context.new_paragraph(os);
1461 Context context(true, parent_context.textclass,
1462 &parent_context.textclass[from_ascii("Verbatim")]);
1463 string s = p.verbatimEnvironment("verbatim");
1464 output_ert(os, s, context);
1468 else if (name == "IPA") {
1469 eat_whitespace(p, os, parent_context, false);
1470 parent_context.check_layout(os);
1471 begin_inset(os, "IPA\n");
1472 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1475 preamble.registerAutomaticallyLoadedPackage("tipa");
1476 preamble.registerAutomaticallyLoadedPackage("tipx");
1479 else if (name == "CJK") {
1480 // the scheme is \begin{CJK}{encoding}{mapping}text\end{CJK}
1481 // It is impossible to decide if a CJK environment was in its own paragraph or within
1482 // a line. We therefore always assume a paragraph since the latter is a rare case.
1483 eat_whitespace(p, os, parent_context, false);
1484 parent_context.check_end_layout(os);
1485 // store the encoding to be able to reset it
1486 string const encoding_old = p.getEncoding();
1487 string const encoding = p.getArg('{', '}');
1488 // FIXME: For some reason JIS does not work. Although the text
1489 // in tests/CJK.tex is identical with the SJIS version if you
1490 // convert both snippets using the recode command line utility,
1491 // the resulting .lyx file contains some extra characters if
1492 // you set buggy_encoding to false for JIS.
1493 bool const buggy_encoding = encoding == "JIS";
1494 if (!buggy_encoding)
1495 p.setEncoding(encoding, Encoding::CJK);
1497 // FIXME: This will read garbage, since the data is not encoded in utf8.
1498 p.setEncoding("UTF-8");
1500 // LyX only supports the same mapping for all CJK
1501 // environments, so we might need to output everything as ERT
1502 string const mapping = trim(p.getArg('{', '}'));
1503 char const * const * const where =
1504 is_known(encoding, supported_CJK_encodings);
1505 if (!buggy_encoding && !preamble.fontCJKSet())
1506 preamble.fontCJK(mapping);
1507 bool knownMapping = mapping == preamble.fontCJK();
1508 if (buggy_encoding || !knownMapping || !where) {
1509 parent_context.check_layout(os);
1510 output_ert_inset(os, "\\begin{" + name + "}{" + encoding + "}{" + mapping + "}",
1512 // we must parse the content as verbatim because e.g. JIS can contain
1513 // normally invalid characters
1514 // FIXME: This works only for the most simple cases.
1515 // Since TeX control characters are not parsed,
1516 // things like comments are completely wrong.
1517 string const s = p.plainEnvironment("CJK");
1518 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
1520 output_ert_inset(os, "\\", parent_context);
1521 else if (*it == '$')
1522 output_ert_inset(os, "$", parent_context);
1523 else if (*it == '\n' && it + 1 != et && s.begin() + 1 != it)
1528 output_ert_inset(os, "\\end{" + name + "}",
1532 supported_CJK_languages[where - supported_CJK_encodings];
1533 // store the language because we must reset it at the end
1534 string const lang_old = parent_context.font.language;
1535 parent_context.font.language = lang;
1536 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1537 parent_context.font.language = lang_old;
1538 parent_context.new_paragraph(os);
1540 p.setEncoding(encoding_old);
1544 else if (name == "lyxgreyedout") {
1545 eat_whitespace(p, os, parent_context, false);
1546 parent_context.check_layout(os);
1547 begin_inset(os, "Note Greyedout\n");
1548 os << "status open\n";
1549 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1552 if (!preamble.notefontcolor().empty())
1553 preamble.registerAutomaticallyLoadedPackage("color");
1556 else if (name == "btSect") {
1557 eat_whitespace(p, os, parent_context, false);
1558 parent_context.check_layout(os);
1559 begin_command_inset(os, "bibtex", "bibtex");
1560 string bibstyle = "plain";
1562 bibstyle = p.getArg('[', ']');
1563 p.skip_spaces(true);
1565 string const bibfile = p.getArg('{', '}');
1566 eat_whitespace(p, os, parent_context, false);
1567 Token t = p.get_token();
1568 if (t.asInput() == "\\btPrintCited") {
1569 p.skip_spaces(true);
1570 os << "btprint " << '"' << "btPrintCited" << '"' << "\n";
1572 if (t.asInput() == "\\btPrintNotCited") {
1573 p.skip_spaces(true);
1574 os << "btprint " << '"' << "btPrintNotCited" << '"' << "\n";
1576 if (t.asInput() == "\\btPrintAll") {
1577 p.skip_spaces(true);
1578 os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
1580 os << "bibfiles " << '"' << bibfile << '"' << "\n";
1581 os << "options " << '"' << bibstyle << '"' << "\n";
1582 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
1587 else if (name == "framed" || name == "shaded") {
1588 eat_whitespace(p, os, parent_context, false);
1589 parse_outer_box(p, os, FLAG_END, outer, parent_context, name, "");
1593 else if (name == "lstlisting") {
1594 eat_whitespace(p, os, parent_context, false);
1595 parse_listings(p, os, parent_context, false);
1599 else if (!parent_context.new_layout_allowed)
1600 parse_unknown_environment(p, name, os, FLAG_END, outer,
1603 // Alignment and spacing settings
1604 // FIXME (bug xxxx): These settings can span multiple paragraphs and
1605 // therefore are totally broken!
1606 // Note that \centering, raggedright, and raggedleft cannot be handled, as
1607 // they are commands not environments. They are furthermore switches that
1608 // can be ended by another switches, but also by commands like \footnote or
1609 // \parbox. So the only safe way is to leave them untouched.
1610 else if (name == "center" || name == "centering" ||
1611 name == "flushleft" || name == "flushright" ||
1612 name == "singlespace" || name == "onehalfspace" ||
1613 name == "doublespace" || name == "spacing") {
1614 eat_whitespace(p, os, parent_context, false);
1615 // We must begin a new paragraph if not already done
1616 if (! parent_context.atParagraphStart()) {
1617 parent_context.check_end_layout(os);
1618 parent_context.new_paragraph(os);
1620 if (name == "flushleft")
1621 parent_context.add_extra_stuff("\\align left\n");
1622 else if (name == "flushright")
1623 parent_context.add_extra_stuff("\\align right\n");
1624 else if (name == "center" || name == "centering")
1625 parent_context.add_extra_stuff("\\align center\n");
1626 else if (name == "singlespace")
1627 parent_context.add_extra_stuff("\\paragraph_spacing single\n");
1628 else if (name == "onehalfspace") {
1629 parent_context.add_extra_stuff("\\paragraph_spacing onehalf\n");
1630 preamble.registerAutomaticallyLoadedPackage("setspace");
1631 } else if (name == "doublespace") {
1632 parent_context.add_extra_stuff("\\paragraph_spacing double\n");
1633 preamble.registerAutomaticallyLoadedPackage("setspace");
1634 } else if (name == "spacing") {
1635 parent_context.add_extra_stuff("\\paragraph_spacing other " + p.verbatim_item() + "\n");
1636 preamble.registerAutomaticallyLoadedPackage("setspace");
1638 parse_text(p, os, FLAG_END, outer, parent_context);
1639 // Just in case the environment is empty
1640 parent_context.extra_stuff.erase();
1641 // We must begin a new paragraph to reset the alignment
1642 parent_context.new_paragraph(os);
1646 // The single '=' is meant here.
1647 else if ((newlayout = findLayout(parent_context.textclass, name, false))) {
1648 eat_whitespace(p, os, parent_context, false);
1649 Context context(true, parent_context.textclass, newlayout,
1650 parent_context.layout, parent_context.font);
1651 if (parent_context.deeper_paragraph) {
1652 // We are beginning a nested environment after a
1653 // deeper paragraph inside the outer list environment.
1654 // Therefore we don't need to output a "begin deeper".
1655 context.need_end_deeper = true;
1657 parent_context.check_end_layout(os);
1658 if (last_env == name) {
1659 // we need to output a separator since LyX would export
1660 // the two environments as one otherwise (bug 5716)
1661 TeX2LyXDocClass const & textclass(parent_context.textclass);
1662 Context newcontext(true, textclass,
1663 &(textclass.defaultLayout()));
1664 newcontext.check_layout(os);
1665 begin_inset(os, "Separator plain\n");
1667 newcontext.check_end_layout(os);
1669 switch (context.layout->latextype) {
1670 case LATEX_LIST_ENVIRONMENT:
1671 context.add_par_extra_stuff("\\labelwidthstring "
1672 + p.verbatim_item() + '\n');
1675 case LATEX_BIB_ENVIRONMENT:
1676 p.verbatim_item(); // swallow next arg
1682 context.check_deeper(os);
1683 // handle known optional and required arguments
1684 // Unfortunately LyX can't handle arguments of list arguments (bug 7468):
1685 // It is impossible to place anything after the environment name,
1686 // but before the first \\item.
1687 if (context.layout->latextype == LATEX_ENVIRONMENT)
1688 output_arguments(os, p, outer, false, false, context,
1689 context.layout->latexargs());
1690 parse_text(p, os, FLAG_END, outer, context);
1691 if (context.layout->latextype == LATEX_ENVIRONMENT)
1692 output_arguments(os, p, outer, false, true, context,
1693 context.layout->postcommandargs());
1694 context.check_end_layout(os);
1695 if (parent_context.deeper_paragraph) {
1696 // We must suppress the "end deeper" because we
1697 // suppressed the "begin deeper" above.
1698 context.need_end_deeper = false;
1700 context.check_end_deeper(os);
1701 parent_context.new_paragraph(os);
1703 if (!preamble.titleLayoutFound())
1704 preamble.titleLayoutFound(newlayout->intitle);
1705 set<string> const & req = newlayout->requires();
1706 set<string>::const_iterator it = req.begin();
1707 set<string>::const_iterator en = req.end();
1708 for (; it != en; ++it)
1709 preamble.registerAutomaticallyLoadedPackage(*it);
1712 // The single '=' is meant here.
1713 else if ((newinsetlayout = findInsetLayout(parent_context.textclass, name, false))) {
1714 eat_whitespace(p, os, parent_context, false);
1715 parent_context.check_layout(os);
1716 begin_inset(os, "Flex ");
1717 os << to_utf8(newinsetlayout->name()) << '\n'
1718 << "status collapsed\n";
1719 if (newinsetlayout->isPassThru()) {
1720 string const arg = p.verbatimEnvironment(name);
1721 Context context(true, parent_context.textclass,
1722 &parent_context.textclass.plainLayout(),
1723 parent_context.layout);
1724 output_ert(os, arg, parent_context);
1726 parse_text_in_inset(p, os, FLAG_END, false, parent_context, newinsetlayout);
1730 else if (name == "appendix") {
1731 // This is no good latex style, but it works and is used in some documents...
1732 eat_whitespace(p, os, parent_context, false);
1733 parent_context.check_end_layout(os);
1734 Context context(true, parent_context.textclass, parent_context.layout,
1735 parent_context.layout, parent_context.font);
1736 context.check_layout(os);
1737 os << "\\start_of_appendix\n";
1738 parse_text(p, os, FLAG_END, outer, context);
1739 context.check_end_layout(os);
1743 else if (known_environments.find(name) != known_environments.end()) {
1744 vector<ArgumentType> arguments = known_environments[name];
1745 // The last "argument" denotes wether we may translate the
1746 // environment contents to LyX
1747 // The default required if no argument is given makes us
1748 // compatible with the reLyXre environment.
1749 ArgumentType contents = arguments.empty() ?
1752 if (!arguments.empty())
1753 arguments.pop_back();
1754 // See comment in parse_unknown_environment()
1755 bool const specialfont =
1756 (parent_context.font != parent_context.normalfont);
1757 bool const new_layout_allowed =
1758 parent_context.new_layout_allowed;
1760 parent_context.new_layout_allowed = false;
1761 parse_arguments("\\begin{" + name + "}", arguments, p, os,
1762 outer, parent_context);
1763 if (contents == verbatim)
1764 output_ert_inset(os, p.ertEnvironment(name),
1767 parse_text_snippet(p, os, FLAG_END, outer,
1769 output_ert_inset(os, "\\end{" + name + "}", parent_context);
1771 parent_context.new_layout_allowed = new_layout_allowed;
1775 parse_unknown_environment(p, name, os, FLAG_END, outer,
1779 active_environments.pop_back();
1783 /// parses a comment and outputs it to \p os.
1784 void parse_comment(Parser & p, ostream & os, Token const & t, Context & context)
1786 LASSERT(t.cat() == catComment, return);
1787 if (!t.cs().empty()) {
1788 context.check_layout(os);
1789 output_ert_inset(os, '%' + t.cs(), context);
1790 if (p.next_token().cat() == catNewline) {
1791 // A newline after a comment line starts a new
1793 if (context.new_layout_allowed) {
1794 if(!context.atParagraphStart())
1795 // Only start a new paragraph if not already
1796 // done (we might get called recursively)
1797 context.new_paragraph(os);
1799 output_ert_inset(os, "\n", context);
1800 eat_whitespace(p, os, context, true);
1803 // "%\n" combination
1810 * Reads spaces and comments until the first non-space, non-comment token.
1811 * New paragraphs (double newlines or \\par) are handled like simple spaces
1812 * if \p eatParagraph is true.
1813 * Spaces are skipped, but comments are written to \p os.
1815 void eat_whitespace(Parser & p, ostream & os, Context & context,
1819 Token const & t = p.get_token();
1820 if (t.cat() == catComment)
1821 parse_comment(p, os, t, context);
1822 else if ((! eatParagraph && p.isParagraph()) ||
1823 (t.cat() != catSpace && t.cat() != catNewline)) {
1832 * Set a font attribute, parse text and reset the font attribute.
1833 * \param attribute Attribute name (e.g. \\family, \\shape etc.)
1834 * \param currentvalue Current value of the attribute. Is set to the new
1835 * value during parsing.
1836 * \param newvalue New value of the attribute
1838 void parse_text_attributes(Parser & p, ostream & os, unsigned flags, bool outer,
1839 Context & context, string const & attribute,
1840 string & currentvalue, string const & newvalue)
1842 context.check_layout(os);
1843 string const oldvalue = currentvalue;
1844 currentvalue = newvalue;
1845 os << '\n' << attribute << ' ' << newvalue << "\n";
1846 parse_text_snippet(p, os, flags, outer, context);
1847 context.check_layout(os);
1848 os << '\n' << attribute << ' ' << oldvalue << "\n";
1849 currentvalue = oldvalue;
1853 /// get the arguments of a natbib or jurabib citation command
1854 void get_cite_arguments(Parser & p, bool natbibOrder,
1855 string & before, string & after)
1857 // We need to distinguish "" and "[]", so we can't use p.getOpt().
1859 // text before the citation
1861 // text after the citation
1862 after = p.getFullOpt();
1864 if (!after.empty()) {
1865 before = p.getFullOpt();
1866 if (natbibOrder && !before.empty())
1867 swap(before, after);
1872 /// Convert filenames with TeX macros and/or quotes to something LyX
1874 string const normalize_filename(string const & name)
1879 Token const & t = p.get_token();
1880 if (t.cat() != catEscape)
1882 else if (t.cs() == "lyxdot") {
1883 // This is used by LyX for simple dots in relative
1887 } else if (t.cs() == "space") {
1890 } else if (t.cs() == "string") {
1891 // Convert \string" to " and \string~ to ~
1892 Token const & n = p.next_token();
1893 if (n.asInput() != "\"" && n.asInput() != "~")
1898 // Strip quotes. This is a bit complicated (see latex_path()).
1899 string full = os.str();
1900 if (!full.empty() && full[0] == '"') {
1901 string base = removeExtension(full);
1902 string ext = getExtension(full);
1903 if (!base.empty() && base[base.length()-1] == '"')
1906 return addExtension(trim(base, "\""), ext);
1907 if (full[full.length()-1] == '"')
1910 return trim(full, "\"");
1916 /// Convert \p name from TeX convention (relative to master file) to LyX
1917 /// convention (relative to .lyx file) if it is relative
1918 void fix_child_filename(string & name)
1920 string const absMasterTeX = getMasterFilePath(true);
1921 bool const isabs = FileName::isAbsolute(name);
1922 // convert from "relative to .tex master" to absolute original path
1924 name = makeAbsPath(name, absMasterTeX).absFileName();
1925 bool copyfile = copyFiles();
1926 string const absParentLyX = getParentFilePath(false);
1929 // convert from absolute original path to "relative to master file"
1930 string const rel = to_utf8(makeRelPath(from_utf8(name),
1931 from_utf8(absMasterTeX)));
1932 // re-interpret "relative to .tex file" as "relative to .lyx file"
1933 // (is different if the master .lyx file resides in a
1934 // different path than the master .tex file)
1935 string const absMasterLyX = getMasterFilePath(false);
1936 abs = makeAbsPath(rel, absMasterLyX).absFileName();
1937 // Do not copy if the new path is impossible to create. Example:
1938 // absMasterTeX = "/foo/bar/"
1939 // absMasterLyX = "/bar/"
1940 // name = "/baz.eps" => new absolute name would be "/../baz.eps"
1941 if (contains(name, "/../"))
1948 // convert from absolute original path to
1949 // "relative to .lyx file"
1950 name = to_utf8(makeRelPath(from_utf8(abs),
1951 from_utf8(absParentLyX)));
1955 // convert from absolute original path to "relative to .lyx file"
1956 name = to_utf8(makeRelPath(from_utf8(name),
1957 from_utf8(absParentLyX)));
1962 void copy_file(FileName const & src, string dstname)
1966 string const absParent = getParentFilePath(false);
1968 if (FileName::isAbsolute(dstname))
1969 dst = FileName(dstname);
1971 dst = makeAbsPath(dstname, absParent);
1972 string const absMaster = getMasterFilePath(false);
1973 FileName const srcpath = src.onlyPath();
1974 FileName const dstpath = dst.onlyPath();
1975 if (equivalent(srcpath, dstpath))
1977 if (!dstpath.isDirectory()) {
1978 if (!dstpath.createPath()) {
1979 cerr << "Warning: Could not create directory for file `"
1980 << dst.absFileName() << "´." << endl;
1984 if (dst.isReadableFile()) {
1985 if (overwriteFiles())
1986 cerr << "Warning: Overwriting existing file `"
1987 << dst.absFileName() << "´." << endl;
1989 cerr << "Warning: Not overwriting existing file `"
1990 << dst.absFileName() << "´." << endl;
1994 if (!src.copyTo(dst))
1995 cerr << "Warning: Could not copy file `" << src.absFileName()
1996 << "´ to `" << dst.absFileName() << "´." << endl;
2000 /// Parse a literate Chunk section. The initial "<<" is already parsed.
2001 bool parse_chunk(Parser & p, ostream & os, Context & context)
2003 // check whether a chunk is possible here.
2004 if (!context.textclass.hasInsetLayout(from_ascii("Flex:Chunk"))) {
2010 // read the parameters
2011 Parser::Arg const params = p.verbatimStuff(">>=\n", false);
2012 if (!params.first) {
2017 Parser::Arg const code = p.verbatimStuff("\n@");
2022 string const post_chunk = p.verbatimStuff("\n").second + '\n';
2023 if (post_chunk[0] != ' ' && post_chunk[0] != '\n') {
2027 // The last newline read is important for paragraph handling
2031 //cerr << "params=[" << params.second << "], code=[" << code.second << "]" <<endl;
2032 // We must have a valid layout before outputting the Chunk inset.
2033 context.check_layout(os);
2034 Context chunkcontext(true, context.textclass);
2035 chunkcontext.layout = &context.textclass.plainLayout();
2036 begin_inset(os, "Flex Chunk");
2037 os << "\nstatus open\n";
2038 if (!params.second.empty()) {
2039 chunkcontext.check_layout(os);
2040 Context paramscontext(true, context.textclass);
2041 paramscontext.layout = &context.textclass.plainLayout();
2042 begin_inset(os, "Argument 1");
2043 os << "\nstatus open\n";
2044 output_ert(os, params.second, paramscontext);
2047 output_ert(os, code.second, chunkcontext);
2055 /// detects \\def, \\long\\def and \\global\\long\\def with ws and comments
2056 bool is_macro(Parser & p)
2058 Token first = p.curr_token();
2059 if (first.cat() != catEscape || !p.good())
2061 if (first.cs() == "def")
2063 if (first.cs() != "global" && first.cs() != "long")
2065 Token second = p.get_token();
2067 while (p.good() && !p.isParagraph() && (second.cat() == catSpace ||
2068 second.cat() == catNewline || second.cat() == catComment)) {
2069 second = p.get_token();
2072 bool secondvalid = second.cat() == catEscape;
2074 bool thirdvalid = false;
2075 if (p.good() && first.cs() == "global" && secondvalid &&
2076 second.cs() == "long") {
2077 third = p.get_token();
2079 while (p.good() && !p.isParagraph() &&
2080 (third.cat() == catSpace ||
2081 third.cat() == catNewline ||
2082 third.cat() == catComment)) {
2083 third = p.get_token();
2086 thirdvalid = third.cat() == catEscape;
2088 for (int i = 0; i < pos; ++i)
2093 return (first.cs() == "global" || first.cs() == "long") &&
2094 second.cs() == "def";
2095 return first.cs() == "global" && second.cs() == "long" &&
2096 third.cs() == "def";
2100 /// Parse a macro definition (assumes that is_macro() returned true)
2101 void parse_macro(Parser & p, ostream & os, Context & context)
2103 context.check_layout(os);
2104 Token first = p.curr_token();
2107 string command = first.asInput();
2108 if (first.cs() != "def") {
2110 eat_whitespace(p, os, context, false);
2111 second = p.curr_token();
2112 command += second.asInput();
2113 if (second.cs() != "def") {
2115 eat_whitespace(p, os, context, false);
2116 third = p.curr_token();
2117 command += third.asInput();
2120 eat_whitespace(p, os, context, false);
2121 string const name = p.get_token().cs();
2122 eat_whitespace(p, os, context, false);
2128 while (p.next_token().cat() != catBegin) {
2129 if (p.next_token().cat() == catParameter) {
2134 // followed by number?
2135 if (p.next_token().cat() == catOther) {
2136 string s = p.get_token().asInput();
2138 // number = current arity + 1?
2139 if (s.size() == 1 && s[0] == arity + '0' + 1)
2144 paramtext += p.get_token().cs();
2146 paramtext += p.get_token().cs();
2151 // only output simple (i.e. compatible) macro as FormulaMacros
2152 string ert = '\\' + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
2154 context.check_layout(os);
2155 begin_inset(os, "FormulaMacro");
2156 os << "\n\\def" << ert;
2159 output_ert_inset(os, command + ert, context);
2163 void registerExternalTemplatePackages(string const & name)
2165 external::TemplateManager const & etm = external::TemplateManager::get();
2166 external::Template const * const et = etm.getTemplateByName(name);
2169 external::Template::Formats::const_iterator cit = et->formats.end();
2171 cit = et->formats.find("PDFLaTeX");
2172 if (cit == et->formats.end())
2173 // If the template has not specified a PDFLaTeX output,
2174 // we try the LaTeX format.
2175 cit = et->formats.find("LaTeX");
2176 if (cit == et->formats.end())
2178 vector<string>::const_iterator qit = cit->second.requirements.begin();
2179 vector<string>::const_iterator qend = cit->second.requirements.end();
2180 for (; qit != qend; ++qit)
2181 preamble.registerAutomaticallyLoadedPackage(*qit);
2184 } // anonymous namespace
2187 void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
2190 Layout const * newlayout = 0;
2191 InsetLayout const * newinsetlayout = 0;
2192 char const * const * where = 0;
2193 // Store the latest bibliographystyle, addcontentslineContent and
2194 // nocite{*} option (needed for bibtex inset)
2196 string contentslineContent;
2197 string bibliographystyle = "default";
2198 bool const use_natbib = isProvided("natbib");
2199 bool const use_jurabib = isProvided("jurabib");
2202 // it is impossible to determine the correct encoding for non-CJK Japanese.
2203 // Therefore write a note at the beginning of the document
2204 if (is_nonCJKJapanese) {
2205 context.check_layout(os);
2206 begin_inset(os, "Note Note\n");
2207 os << "status open\n\\begin_layout Plain Layout\n"
2208 << "\\series bold\n"
2209 << "Important information:\n"
2210 << "\\end_layout\n\n"
2211 << "\\begin_layout Plain Layout\n"
2212 << "The original LaTeX source for this document is in Japanese (pLaTeX).\n"
2213 << " It was therefore impossible for tex2lyx to determine the correct encoding.\n"
2214 << " The iconv encoding " << p.getEncoding() << " was used.\n"
2215 << " If this is incorrect, you must run the tex2lyx program on the command line\n"
2216 << " and specify the encoding using the -e command-line switch.\n"
2217 << " In addition, you might want to double check that the desired output encoding\n"
2218 << " is correctly selected in Document > Settings > Language.\n"
2219 << "\\end_layout\n";
2221 is_nonCJKJapanese = false;
2225 Token const & t = p.get_token();
2227 debugToken(cerr, t, flags);
2230 if (flags & FLAG_ITEM) {
2231 if (t.cat() == catSpace)
2234 flags &= ~FLAG_ITEM;
2235 if (t.cat() == catBegin) {
2236 // skip the brace and collect everything to the next matching
2238 flags |= FLAG_BRACE_LAST;
2242 // handle only this single token, leave the loop if done
2243 flags |= FLAG_LEAVE;
2246 if (t.cat() != catEscape && t.character() == ']' &&
2247 (flags & FLAG_BRACK_LAST))
2249 if (t.cat() == catEnd && (flags & FLAG_BRACE_LAST))
2252 // If there is anything between \end{env} and \begin{env} we
2253 // don't need to output a separator.
2254 if (t.cat() != catSpace && t.cat() != catNewline &&
2255 t.asInput() != "\\begin")
2261 bool const starred = p.next_token().asInput() == "*";
2262 string const starredname(starred ? (t.cs() + '*') : t.cs());
2263 if (t.cat() == catMath) {
2264 // we are inside some text mode thingy, so opening new math is allowed
2265 context.check_layout(os);
2266 begin_inset(os, "Formula ");
2267 Token const & n = p.get_token();
2268 bool const display(n.cat() == catMath && outer);
2270 // TeX's $$...$$ syntax for displayed math
2272 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
2274 p.get_token(); // skip the second '$' token
2276 // simple $...$ stuff
2279 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
2284 // Prevent the conversion of a line break to a
2285 // space (bug 7668). This does not change the
2286 // output, but looks ugly in LyX.
2287 eat_whitespace(p, os, context, false);
2291 else if (t.cat() == catSuper || t.cat() == catSub)
2292 cerr << "catcode " << t << " illegal in text mode\n";
2294 // Basic support for english quotes. This should be
2295 // extended to other quotes, but is not so easy (a
2296 // left english quote is the same as a right german
2298 else if (t.asInput() == "`" && p.next_token().asInput() == "`") {
2299 context.check_layout(os);
2300 begin_inset(os, "Quotes ");
2306 else if (t.asInput() == "'" && p.next_token().asInput() == "'") {
2307 context.check_layout(os);
2308 begin_inset(os, "Quotes ");
2315 else if (t.asInput() == ">" && p.next_token().asInput() == ">") {
2316 context.check_layout(os);
2317 begin_inset(os, "Quotes ");
2324 else if (t.asInput() == "<"
2325 && p.next_token().asInput() == "<") {
2326 bool has_chunk = false;
2330 has_chunk = parse_chunk(p, os, context);
2336 context.check_layout(os);
2337 begin_inset(os, "Quotes ");
2338 //FIXME: this is a right danish quote;
2339 // why not a left french quote?
2347 else if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph()))
2348 check_space(p, os, context);
2350 else if (t.character() == '[' && noweb_mode &&
2351 p.next_token().character() == '[') {
2352 // These can contain underscores
2354 string const s = p.getFullOpt() + ']';
2355 if (p.next_token().character() == ']')
2358 cerr << "Warning: Inserting missing ']' in '"
2359 << s << "'." << endl;
2360 output_ert_inset(os, s, context);
2363 else if (t.cat() == catLetter) {
2364 context.check_layout(os);
2368 else if (t.cat() == catOther ||
2369 t.cat() == catAlign ||
2370 t.cat() == catParameter) {
2371 context.check_layout(os);
2372 if (t.asInput() == "-" && p.next_token().asInput() == "-" &&
2373 context.merging_hyphens_allowed &&
2374 context.font.family != "ttfamily" &&
2375 !context.layout->pass_thru) {
2376 if (p.next_next_token().asInput() == "-") {
2378 os << to_utf8(docstring(1, 0x2014));
2382 os << to_utf8(docstring(1, 0x2013));
2385 // This translates "&" to "\\&" which may be wrong...
2389 else if (p.isParagraph()) {
2390 if (context.new_layout_allowed)
2391 context.new_paragraph(os);
2393 output_ert_inset(os, "\\par ", context);
2394 eat_whitespace(p, os, context, true);
2397 else if (t.cat() == catActive) {
2398 context.check_layout(os);
2399 if (t.character() == '~') {
2400 if (context.layout->free_spacing)
2403 begin_inset(os, "space ~\n");
2410 else if (t.cat() == catBegin) {
2411 Token const next = p.next_token();
2412 Token const end = p.next_next_token();
2413 if (next.cat() == catEnd) {
2415 Token const prev = p.prev_token();
2417 if (p.next_token().character() == '`')
2418 ; // ignore it in {}``
2420 output_ert_inset(os, "{}", context);
2421 } else if (next.cat() == catEscape &&
2422 is_known(next.cs(), known_quotes) &&
2423 end.cat() == catEnd) {
2424 // Something like {\textquoteright} (e.g.
2425 // from writer2latex). LyX writes
2426 // \textquoteright{}, so we may skip the
2427 // braces here for better readability.
2428 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2430 } else if (p.next_token().asInput() == "\\ascii") {
2431 // handle the \ascii characters
2432 // (the case without braces is handled later)
2433 // the code is "{\ascii\xxx}"
2434 p.get_token(); // eat \ascii
2435 string name2 = p.get_token().asInput();
2436 p.get_token(); // eat the final '}'
2437 string const name = "{\\ascii" + name2 + "}";
2441 // get the character from unicodesymbols
2442 docstring s = encodings.fromLaTeXCommand(from_utf8(name),
2443 Encodings::TEXT_CMD, termination, rem, &req);
2445 context.check_layout(os);
2448 output_ert_inset(os,
2449 to_utf8(rem), context);
2450 for (set<string>::const_iterator it = req.begin();
2451 it != req.end(); ++it)
2452 preamble.registerAutomaticallyLoadedPackage(*it);
2454 // we did not find a non-ert version
2455 output_ert_inset(os, name, context);
2457 context.check_layout(os);
2458 // special handling of font attribute changes
2459 Token const prev = p.prev_token();
2460 TeXFont const oldFont = context.font;
2461 if (next.character() == '[' ||
2462 next.character() == ']' ||
2463 next.character() == '*') {
2465 if (p.next_token().cat() == catEnd) {
2470 output_ert_inset(os, "{", context);
2471 parse_text_snippet(p, os,
2474 output_ert_inset(os, "}", context);
2476 } else if (! context.new_layout_allowed) {
2477 output_ert_inset(os, "{", context);
2478 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2480 output_ert_inset(os, "}", context);
2481 } else if (is_known(next.cs(), known_sizes)) {
2482 // next will change the size, so we must
2484 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2486 if (!context.atParagraphStart())
2488 << context.font.size << "\n";
2489 } else if (is_known(next.cs(), known_font_families)) {
2490 // next will change the font family, so we
2491 // must reset it here
2492 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2494 if (!context.atParagraphStart())
2496 << context.font.family << "\n";
2497 } else if (is_known(next.cs(), known_font_series)) {
2498 // next will change the font series, so we
2499 // must reset it here
2500 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2502 if (!context.atParagraphStart())
2504 << context.font.series << "\n";
2505 } else if (is_known(next.cs(), known_font_shapes)) {
2506 // next will change the font shape, so we
2507 // must reset it here
2508 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2510 if (!context.atParagraphStart())
2512 << context.font.shape << "\n";
2513 } else if (is_known(next.cs(), known_old_font_families) ||
2514 is_known(next.cs(), known_old_font_series) ||
2515 is_known(next.cs(), known_old_font_shapes)) {
2516 // next will change the font family, series
2517 // and shape, so we must reset it here
2518 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2520 if (!context.atParagraphStart())
2522 << context.font.family
2524 << context.font.series
2526 << context.font.shape << "\n";
2528 output_ert_inset(os, "{", context);
2529 parse_text_snippet(p, os, FLAG_BRACE_LAST,
2531 output_ert_inset(os, "}", context);
2536 else if (t.cat() == catEnd) {
2537 if (flags & FLAG_BRACE_LAST) {
2540 cerr << "stray '}' in text\n";
2541 output_ert_inset(os, "}", context);
2544 else if (t.cat() == catComment)
2545 parse_comment(p, os, t, context);
2548 // control sequences
2551 else if (t.cs() == "(" || t.cs() == "[") {
2552 bool const simple = t.cs() == "(";
2553 context.check_layout(os);
2554 begin_inset(os, "Formula");
2555 os << " \\" << t.cs();
2556 parse_math(p, os, simple ? FLAG_SIMPLE2 : FLAG_EQUATION, MATH_MODE);
2557 os << '\\' << (simple ? ')' : ']');
2560 // Prevent the conversion of a line break to a
2561 // space (bug 7668). This does not change the
2562 // output, but looks ugly in LyX.
2563 eat_whitespace(p, os, context, false);
2567 else if (t.cs() == "begin")
2568 parse_environment(p, os, outer, last_env,
2571 else if (t.cs() == "end") {
2572 if (flags & FLAG_END) {
2573 // eat environment name
2574 string const name = p.getArg('{', '}');
2575 if (name != active_environment())
2576 cerr << "\\end{" + name + "} does not match \\begin{"
2577 + active_environment() + "}\n";
2580 p.error("found 'end' unexpectedly");
2583 else if (t.cs() == "item") {
2585 bool const optarg = p.hasOpt();
2587 // FIXME: This swallows comments, but we cannot use
2588 // eat_whitespace() since we must not output
2589 // anything before the item.
2590 p.skip_spaces(true);
2591 s = p.verbatimOption();
2593 p.skip_spaces(false);
2595 context.check_layout(os);
2596 if (context.has_item) {
2597 // An item in an unknown list-like environment
2598 // FIXME: Do this in check_layout()!
2599 context.has_item = false;
2601 output_ert_inset(os, "\\item", context);
2603 output_ert_inset(os, "\\item ", context);
2606 if (context.layout->labeltype != LABEL_MANUAL) {
2607 // handle option of itemize item
2608 begin_inset(os, "Argument item:1\n");
2609 os << "status open\n";
2610 os << "\n\\begin_layout Plain Layout\n";
2612 os << parse_text_snippet(p2,
2613 FLAG_BRACK_LAST, outer, context);
2614 // we must not use context.check_end_layout(os)
2615 // because that would close the outer itemize layout
2616 os << "\n\\end_layout\n";
2618 eat_whitespace(p, os, context, false);
2619 } else if (!s.empty()) {
2620 // LyX adds braces around the argument,
2621 // so we need to remove them here.
2622 if (s.size() > 2 && s[0] == '{' &&
2623 s[s.size()-1] == '}')
2624 s = s.substr(1, s.size()-2);
2625 // If the argument contains a space we
2626 // must put it into ERT: Otherwise LyX
2627 // would misinterpret the space as
2628 // item delimiter (bug 7663)
2629 if (contains(s, ' ')) {
2630 output_ert_inset(os, s, context);
2633 os << parse_text_snippet(p2,
2634 FLAG_BRACK_LAST, outer, context);
2636 // The space is needed to separate the
2637 // item from the rest of the sentence.
2639 eat_whitespace(p, os, context, false);
2644 else if (t.cs() == "bibitem") {
2646 context.check_layout(os);
2647 eat_whitespace(p, os, context, false);
2648 string label = convert_command_inset_arg(p.verbatimOption());
2649 string key = convert_command_inset_arg(p.verbatim_item());
2650 if (contains(label, '\\') || contains(key, '\\')) {
2651 // LyX can't handle LaTeX commands in labels or keys
2652 output_ert_inset(os, t.asInput() + '[' + label +
2653 "]{" + p.verbatim_item() + '}',
2656 begin_command_inset(os, "bibitem", "bibitem");
2657 os << "label \"" << label << "\"\n"
2658 "key \"" << key << "\"\n";
2663 else if (is_macro(p)) {
2664 // catch the case of \def\inputGnumericTable
2666 if (t.cs() == "def") {
2667 Token second = p.next_token();
2668 if (second.cs() == "inputGnumericTable") {
2672 Token third = p.get_token();
2674 if (third.cs() == "input") {
2678 string name = normalize_filename(p.verbatim_item());
2679 string const path = getMasterFilePath(true);
2680 // We want to preserve relative / absolute filenames,
2681 // therefore path is only used for testing
2682 // The file extension is in every case ".tex".
2683 // So we need to remove this extension and check for
2684 // the original one.
2685 name = removeExtension(name);
2686 if (!makeAbsPath(name, path).exists()) {
2687 char const * const Gnumeric_formats[] = {"gnumeric",
2689 string const Gnumeric_name =
2690 find_file(name, path, Gnumeric_formats);
2691 if (!Gnumeric_name.empty())
2692 name = Gnumeric_name;
2694 FileName const absname = makeAbsPath(name, path);
2695 if (absname.exists()) {
2696 fix_child_filename(name);
2697 copy_file(absname, name);
2699 cerr << "Warning: Could not find file '"
2700 << name << "'." << endl;
2701 context.check_layout(os);
2702 begin_inset(os, "External\n\ttemplate ");
2703 os << "GnumericSpreadsheet\n\tfilename "
2706 context.check_layout(os);
2708 // register the packages that are automatically loaded
2709 // by the Gnumeric template
2710 registerExternalTemplatePackages("GnumericSpreadsheet");
2715 parse_macro(p, os, context);
2718 else if (t.cs() == "noindent") {
2720 context.add_par_extra_stuff("\\noindent\n");
2723 else if (t.cs() == "appendix") {
2724 context.add_par_extra_stuff("\\start_of_appendix\n");
2725 // We need to start a new paragraph. Otherwise the
2726 // appendix in 'bla\appendix\chapter{' would start
2728 context.new_paragraph(os);
2729 // We need to make sure that the paragraph is
2730 // generated even if it is empty. Otherwise the
2731 // appendix in '\par\appendix\par\chapter{' would
2733 context.check_layout(os);
2734 // FIXME: This is a hack to prevent paragraph
2735 // deletion if it is empty. Handle this better!
2736 output_ert_inset(os,
2737 "%dummy comment inserted by tex2lyx to "
2738 "ensure that this paragraph is not empty",
2740 // Both measures above may generate an additional
2741 // empty paragraph, but that does not hurt, because
2742 // whitespace does not matter here.
2743 eat_whitespace(p, os, context, true);
2746 // Must catch empty dates before findLayout is called below
2747 else if (t.cs() == "date") {
2748 eat_whitespace(p, os, context, false);
2750 string const date = p.verbatim_item();
2753 preamble.suppressDate(true);
2756 preamble.suppressDate(false);
2757 if (context.new_layout_allowed &&
2758 (newlayout = findLayout(context.textclass,
2761 output_command_layout(os, p, outer,
2762 context, newlayout);
2763 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
2764 if (!preamble.titleLayoutFound())
2765 preamble.titleLayoutFound(newlayout->intitle);
2766 set<string> const & req = newlayout->requires();
2767 set<string>::const_iterator it = req.begin();
2768 set<string>::const_iterator en = req.end();
2769 for (; it != en; ++it)
2770 preamble.registerAutomaticallyLoadedPackage(*it);
2772 output_ert_inset(os,
2773 "\\date{" + p.verbatim_item() + '}',
2778 // Starred section headings
2779 // Must attempt to parse "Section*" before "Section".
2780 else if ((p.next_token().asInput() == "*") &&
2781 context.new_layout_allowed &&
2782 (newlayout = findLayout(context.textclass, t.cs() + '*', true))) {
2785 output_command_layout(os, p, outer, context, newlayout);
2787 if (!preamble.titleLayoutFound())
2788 preamble.titleLayoutFound(newlayout->intitle);
2789 set<string> const & req = newlayout->requires();
2790 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
2791 preamble.registerAutomaticallyLoadedPackage(*it);
2794 // Section headings and the like
2795 else if (context.new_layout_allowed &&
2796 (newlayout = findLayout(context.textclass, t.cs(), true))) {
2798 output_command_layout(os, p, outer, context, newlayout);
2800 if (!preamble.titleLayoutFound())
2801 preamble.titleLayoutFound(newlayout->intitle);
2802 set<string> const & req = newlayout->requires();
2803 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
2804 preamble.registerAutomaticallyLoadedPackage(*it);
2807 else if (t.cs() == "subfloat") {
2808 // the syntax is \subfloat[list entry][sub caption]{content}
2809 // if it is a table of figure depends on the surrounding float
2810 // FIXME: second optional argument is not parsed
2811 bool has_caption = false;
2813 // do nothing if there is no outer float
2814 if (!float_type.empty()) {
2815 context.check_layout(os);
2817 begin_inset(os, "Float " + float_type + "\n");
2819 << "\nsideways false"
2820 << "\nstatus collapsed\n\n";
2823 if (p.next_token().cat() != catEscape &&
2824 p.next_token().character() == '[') {
2825 p.get_token(); // eat '['
2826 caption = parse_text_snippet(p, FLAG_BRACK_LAST, outer, context);
2830 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
2831 // the caption comes always as the last
2833 // we must make sure that the caption gets a \begin_layout
2834 os << "\n\\begin_layout Plain Layout";
2836 begin_inset(os, "Caption Standard\n");
2837 Context newcontext(true, context.textclass,
2838 0, 0, context.font);
2839 newcontext.check_layout(os);
2840 os << caption << "\n";
2841 newcontext.check_end_layout(os);
2842 // We don't need really a new paragraph, but
2843 // we must make sure that the next item gets a \begin_layout.
2844 //newcontext.new_paragraph(os);
2848 // We don't need really a new paragraph, but
2849 // we must make sure that the next item gets a \begin_layout.
2851 context.new_paragraph(os);
2854 context.check_end_layout(os);
2855 // close the layout we opened
2857 os << "\n\\end_layout\n";
2859 // if the float type is not supported or there is no surrounding float
2862 string opt_arg = convert_command_inset_arg(p.getArg('[', ']'));
2863 output_ert_inset(os, t.asInput() + '[' + opt_arg +
2864 "]{" + p.verbatim_item() + '}', context);
2866 output_ert_inset(os, t.asInput() + "{" + p.verbatim_item() + '}', context);
2870 else if (t.cs() == "includegraphics") {
2871 bool const clip = p.next_token().asInput() == "*";
2874 string const arg = p.getArg('[', ']');
2875 map<string, string> opts;
2876 vector<string> keys;
2877 split_map(arg, opts, keys);
2879 opts["clip"] = string();
2880 string name = normalize_filename(p.verbatim_item());
2882 string const path = getMasterFilePath(true);
2883 // We want to preserve relative / absolute filenames,
2884 // therefore path is only used for testing
2885 if (!makeAbsPath(name, path).exists()) {
2886 // The file extension is probably missing.
2887 // Now try to find it out.
2888 string const dvips_name =
2889 find_file(name, path,
2890 known_dvips_graphics_formats);
2891 string const pdftex_name =
2892 find_file(name, path,
2893 known_pdftex_graphics_formats);
2894 if (!dvips_name.empty()) {
2895 if (!pdftex_name.empty()) {
2896 cerr << "This file contains the "
2898 "\"\\includegraphics{"
2900 "However, files\n\""
2901 << dvips_name << "\" and\n\""
2902 << pdftex_name << "\"\n"
2903 "both exist, so I had to make a "
2904 "choice and took the first one.\n"
2905 "Please move the unwanted one "
2906 "someplace else and try again\n"
2907 "if my choice was wrong."
2911 } else if (!pdftex_name.empty()) {
2917 FileName const absname = makeAbsPath(name, path);
2918 if (absname.exists()) {
2919 fix_child_filename(name);
2920 copy_file(absname, name);
2922 cerr << "Warning: Could not find graphics file '"
2923 << name << "'." << endl;
2925 context.check_layout(os);
2926 begin_inset(os, "Graphics ");
2927 os << "\n\tfilename " << name << '\n';
2928 if (opts.find("width") != opts.end())
2930 << translate_len(opts["width"]) << '\n';
2931 if (opts.find("height") != opts.end())
2933 << translate_len(opts["height"]) << '\n';
2934 if (opts.find("scale") != opts.end()) {
2935 istringstream iss(opts["scale"]);
2939 os << "\tscale " << val << '\n';
2941 if (opts.find("angle") != opts.end()) {
2942 os << "\trotateAngle "
2943 << opts["angle"] << '\n';
2944 vector<string>::const_iterator a =
2945 find(keys.begin(), keys.end(), "angle");
2946 vector<string>::const_iterator s =
2947 find(keys.begin(), keys.end(), "width");
2948 if (s == keys.end())
2949 s = find(keys.begin(), keys.end(), "height");
2950 if (s == keys.end())
2951 s = find(keys.begin(), keys.end(), "scale");
2952 if (s != keys.end() && distance(s, a) > 0)
2953 os << "\tscaleBeforeRotation\n";
2955 if (opts.find("origin") != opts.end()) {
2957 string const opt = opts["origin"];
2958 if (opt.find('l') != string::npos) ss << "left";
2959 if (opt.find('r') != string::npos) ss << "right";
2960 if (opt.find('c') != string::npos) ss << "center";
2961 if (opt.find('t') != string::npos) ss << "Top";
2962 if (opt.find('b') != string::npos) ss << "Bottom";
2963 if (opt.find('B') != string::npos) ss << "Baseline";
2964 if (!ss.str().empty())
2965 os << "\trotateOrigin " << ss.str() << '\n';
2967 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
2969 if (opts.find("keepaspectratio") != opts.end())
2970 os << "\tkeepAspectRatio\n";
2971 if (opts.find("clip") != opts.end())
2973 if (opts.find("draft") != opts.end())
2975 if (opts.find("bb") != opts.end())
2976 os << "\tBoundingBox "
2977 << opts["bb"] << '\n';
2978 int numberOfbbOptions = 0;
2979 if (opts.find("bbllx") != opts.end())
2980 numberOfbbOptions++;
2981 if (opts.find("bblly") != opts.end())
2982 numberOfbbOptions++;
2983 if (opts.find("bburx") != opts.end())
2984 numberOfbbOptions++;
2985 if (opts.find("bbury") != opts.end())
2986 numberOfbbOptions++;
2987 if (numberOfbbOptions == 4)
2988 os << "\tBoundingBox "
2989 << opts["bbllx"] << " " << opts["bblly"] << " "
2990 << opts["bburx"] << " " << opts["bbury"] << '\n';
2991 else if (numberOfbbOptions > 0)
2992 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
2993 numberOfbbOptions = 0;
2994 if (opts.find("natwidth") != opts.end())
2995 numberOfbbOptions++;
2996 if (opts.find("natheight") != opts.end())
2997 numberOfbbOptions++;
2998 if (numberOfbbOptions == 2)
2999 os << "\tBoundingBox 0bp 0bp "
3000 << opts["natwidth"] << " " << opts["natheight"] << '\n';
3001 else if (numberOfbbOptions > 0)
3002 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
3003 ostringstream special;
3004 if (opts.find("hiresbb") != opts.end())
3005 special << "hiresbb,";
3006 if (opts.find("trim") != opts.end())
3008 if (opts.find("viewport") != opts.end())
3009 special << "viewport=" << opts["viewport"] << ',';
3010 if (opts.find("totalheight") != opts.end())
3011 special << "totalheight=" << opts["totalheight"] << ',';
3012 if (opts.find("type") != opts.end())
3013 special << "type=" << opts["type"] << ',';
3014 if (opts.find("ext") != opts.end())
3015 special << "ext=" << opts["ext"] << ',';
3016 if (opts.find("read") != opts.end())
3017 special << "read=" << opts["read"] << ',';
3018 if (opts.find("command") != opts.end())
3019 special << "command=" << opts["command"] << ',';
3020 string s_special = special.str();
3021 if (!s_special.empty()) {
3022 // We had special arguments. Remove the trailing ','.
3023 os << "\tspecial " << s_special.substr(0, s_special.size() - 1) << '\n';
3025 // TODO: Handle the unknown settings better.
3026 // Warn about invalid options.
3027 // Check whether some option was given twice.
3029 preamble.registerAutomaticallyLoadedPackage("graphicx");
3032 else if (t.cs() == "footnote" ||
3033 (t.cs() == "thanks" && context.layout->intitle)) {
3035 context.check_layout(os);
3036 begin_inset(os, "Foot\n");
3037 os << "status collapsed\n\n";
3038 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
3042 else if (t.cs() == "marginpar") {
3044 context.check_layout(os);
3045 begin_inset(os, "Marginal\n");
3046 os << "status collapsed\n\n";
3047 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
3051 else if (t.cs() == "lstinline") {
3053 parse_listings(p, os, context, true);
3056 else if (t.cs() == "ensuremath") {
3058 context.check_layout(os);
3059 string const s = p.verbatim_item();
3060 //FIXME: this never triggers in UTF8
3061 if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
3064 output_ert_inset(os, "\\ensuremath{" + s + "}",
3068 else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
3069 if (preamble.titleLayoutFound()) {
3071 skip_spaces_braces(p);
3073 output_ert_inset(os, t.asInput(), context);
3076 else if (t.cs() == "tableofcontents" || t.cs() == "lstlistoflistings") {
3077 context.check_layout(os);
3078 begin_command_inset(os, "toc", t.cs());
3080 skip_spaces_braces(p);
3081 if (t.cs() == "lstlistoflistings")
3082 preamble.registerAutomaticallyLoadedPackage("listings");
3085 else if (t.cs() == "listoffigures" || t.cs() == "listoftables") {
3086 context.check_layout(os);
3087 if (t.cs() == "listoffigures")
3088 begin_inset(os, "FloatList figure\n");
3090 begin_inset(os, "FloatList table\n");
3092 skip_spaces_braces(p);
3095 else if (t.cs() == "listof") {
3096 p.skip_spaces(true);
3097 string const name = p.get_token().cs();
3098 if (context.textclass.floats().typeExist(name)) {
3099 context.check_layout(os);
3100 begin_inset(os, "FloatList ");
3103 p.get_token(); // swallow second arg
3105 output_ert_inset(os, "\\listof{" + name + "}", context);
3108 else if ((where = is_known(t.cs(), known_text_font_families)))
3109 parse_text_attributes(p, os, FLAG_ITEM, outer,
3110 context, "\\family", context.font.family,
3111 known_coded_font_families[where - known_text_font_families]);
3113 else if ((where = is_known(t.cs(), known_text_font_series)))
3114 parse_text_attributes(p, os, FLAG_ITEM, outer,
3115 context, "\\series", context.font.series,
3116 known_coded_font_series[where - known_text_font_series]);
3118 else if ((where = is_known(t.cs(), known_text_font_shapes)))
3119 parse_text_attributes(p, os, FLAG_ITEM, outer,
3120 context, "\\shape", context.font.shape,
3121 known_coded_font_shapes[where - known_text_font_shapes]);
3123 else if (t.cs() == "textnormal" || t.cs() == "normalfont") {
3124 context.check_layout(os);
3125 TeXFont oldFont = context.font;
3126 context.font.init();
3127 context.font.size = oldFont.size;
3128 os << "\n\\family " << context.font.family << "\n";
3129 os << "\n\\series " << context.font.series << "\n";
3130 os << "\n\\shape " << context.font.shape << "\n";
3131 if (t.cs() == "textnormal") {
3132 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3133 output_font_change(os, context.font, oldFont);
3134 context.font = oldFont;
3136 eat_whitespace(p, os, context, false);
3139 else if (t.cs() == "textcolor") {
3140 // scheme is \textcolor{color name}{text}
3141 string const color = p.verbatim_item();
3142 // we only support the predefined colors of the color package
3143 if (color == "black" || color == "blue" || color == "cyan"
3144 || color == "green" || color == "magenta" || color == "red"
3145 || color == "white" || color == "yellow") {
3146 context.check_layout(os);
3147 os << "\n\\color " << color << "\n";
3148 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3149 context.check_layout(os);
3150 os << "\n\\color inherit\n";
3151 preamble.registerAutomaticallyLoadedPackage("color");
3153 // for custom defined colors
3154 output_ert_inset(os, t.asInput() + "{" + color + "}", context);
3157 else if (t.cs() == "underbar" || t.cs() == "uline") {
3158 // \underbar is not 100% correct (LyX outputs \uline
3159 // of ulem.sty). The difference is that \ulem allows
3160 // line breaks, and \underbar does not.
3161 // Do NOT handle \underline.
3162 // \underbar cuts through y, g, q, p etc.,
3163 // \underline does not.
3164 context.check_layout(os);
3165 os << "\n\\bar under\n";
3166 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3167 context.check_layout(os);
3168 os << "\n\\bar default\n";
3169 preamble.registerAutomaticallyLoadedPackage("ulem");
3172 else if (t.cs() == "sout") {
3173 context.check_layout(os);
3174 os << "\n\\strikeout on\n";
3175 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3176 context.check_layout(os);
3177 os << "\n\\strikeout default\n";
3178 preamble.registerAutomaticallyLoadedPackage("ulem");
3181 else if (t.cs() == "uuline" || t.cs() == "uwave" ||
3182 t.cs() == "emph" || t.cs() == "noun") {
3183 context.check_layout(os);
3184 os << "\n\\" << t.cs() << " on\n";
3185 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3186 context.check_layout(os);
3187 os << "\n\\" << t.cs() << " default\n";
3188 if (t.cs() == "uuline" || t.cs() == "uwave")
3189 preamble.registerAutomaticallyLoadedPackage("ulem");
3192 else if (t.cs() == "lyxadded" || t.cs() == "lyxdeleted") {
3193 context.check_layout(os);
3194 string name = p.getArg('{', '}');
3195 string localtime = p.getArg('{', '}');
3196 preamble.registerAuthor(name);
3197 Author const & author = preamble.getAuthor(name);
3198 // from_asctime_utc() will fail if LyX decides to output the
3199 // time in the text language.
3200 time_t ptime = from_asctime_utc(localtime);
3201 if (ptime == static_cast<time_t>(-1)) {
3202 cerr << "Warning: Could not parse time `" << localtime
3203 << "´ for change tracking, using current time instead.\n";
3204 ptime = current_time();
3206 if (t.cs() == "lyxadded")
3207 os << "\n\\change_inserted ";
3209 os << "\n\\change_deleted ";
3210 os << author.bufferId() << ' ' << ptime << '\n';
3211 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
3212 bool dvipost = LaTeXPackages::isAvailable("dvipost");
3213 bool xcolorulem = LaTeXPackages::isAvailable("ulem") &&
3214 LaTeXPackages::isAvailable("xcolor");
3215 // No need to test for luatex, since luatex comes in
3216 // two flavours (dvi and pdf), like latex, and those
3217 // are detected by pdflatex.
3218 if (pdflatex || xetex) {
3220 preamble.registerAutomaticallyLoadedPackage("ulem");
3221 preamble.registerAutomaticallyLoadedPackage("xcolor");
3222 preamble.registerAutomaticallyLoadedPackage("pdfcolmk");
3226 preamble.registerAutomaticallyLoadedPackage("dvipost");
3227 } else if (xcolorulem) {
3228 preamble.registerAutomaticallyLoadedPackage("ulem");
3229 preamble.registerAutomaticallyLoadedPackage("xcolor");
3234 else if (t.cs() == "textipa") {
3235 context.check_layout(os);
3236 begin_inset(os, "IPA\n");
3237 bool merging_hyphens_allowed = context.merging_hyphens_allowed;
3238 context.merging_hyphens_allowed = false;
3239 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
3240 context.merging_hyphens_allowed = merging_hyphens_allowed;
3242 preamble.registerAutomaticallyLoadedPackage("tipa");
3243 preamble.registerAutomaticallyLoadedPackage("tipx");
3246 else if (t.cs() == "texttoptiebar" || t.cs() == "textbottomtiebar") {
3247 context.check_layout(os);
3248 begin_inset(os, "IPADeco " + t.cs().substr(4) + "\n");
3249 os << "status open\n";
3250 parse_text_in_inset(p, os, FLAG_ITEM, outer, context);
3255 else if (t.cs() == "textvertline") {
3256 // FIXME: This is not correct, \textvertline is higher than |
3262 else if (t.cs() == "tone" ) {
3263 context.check_layout(os);
3264 // register the tone package
3265 preamble.registerAutomaticallyLoadedPackage("tone");
3266 string content = trimSpaceAndEol(p.verbatim_item());
3267 string command = t.asInput() + "{" + content + "}";
3268 // some tones can be detected by unicodesymbols, some need special code
3269 if (is_known(content, known_tones)) {
3270 os << "\\IPAChar " << command << "\n";
3273 // try to see whether the string is in unicodesymbols
3277 docstring s = encodings.fromLaTeXCommand(from_utf8(command),
3278 Encodings::TEXT_CMD | Encodings::MATH_CMD,
3279 termination, rem, &req);
3283 output_ert_inset(os, to_utf8(rem), context);
3284 for (set<string>::const_iterator it = req.begin();
3285 it != req.end(); ++it)
3286 preamble.registerAutomaticallyLoadedPackage(*it);
3288 // we did not find a non-ert version
3289 output_ert_inset(os, command, context);
3292 else if (t.cs() == "phantom" || t.cs() == "hphantom" ||
3293 t.cs() == "vphantom") {
3294 context.check_layout(os);
3295 if (t.cs() == "phantom")
3296 begin_inset(os, "Phantom Phantom\n");
3297 if (t.cs() == "hphantom")
3298 begin_inset(os, "Phantom HPhantom\n");
3299 if (t.cs() == "vphantom")
3300 begin_inset(os, "Phantom VPhantom\n");
3301 os << "status open\n";
3302 parse_text_in_inset(p, os, FLAG_ITEM, outer, context,
3307 else if (t.cs() == "href") {
3308 context.check_layout(os);
3309 string target = convert_command_inset_arg(p.verbatim_item());
3310 string name = convert_command_inset_arg(p.verbatim_item());
3312 size_t i = target.find(':');
3313 if (i != string::npos) {
3314 type = target.substr(0, i + 1);
3315 if (type == "mailto:" || type == "file:")
3316 target = target.substr(i + 1);
3317 // handle the case that name is equal to target, except of "http://"
3318 else if (target.substr(i + 3) == name && type == "http:")
3321 begin_command_inset(os, "href", "href");
3323 os << "name \"" << name << "\"\n";
3324 os << "target \"" << target << "\"\n";
3325 if (type == "mailto:" || type == "file:")
3326 os << "type \"" << type << "\"\n";
3328 skip_spaces_braces(p);
3331 else if (t.cs() == "lyxline") {
3332 // swallow size argument (it is not used anyway)
3334 if (!context.atParagraphStart()) {
3335 // so our line is in the middle of a paragraph
3336 // we need to add a new line, lest this line
3337 // follow the other content on that line and
3338 // run off the side of the page
3339 // FIXME: This may create an empty paragraph,
3340 // but without that it would not be
3341 // possible to set noindent below.
3342 // Fortunately LaTeX does not care
3343 // about the empty paragraph.
3344 context.new_paragraph(os);
3346 if (preamble.indentParagraphs()) {
3347 // we need to unindent, lest the line be too long
3348 context.add_par_extra_stuff("\\noindent\n");
3350 context.check_layout(os);
3351 begin_command_inset(os, "line", "rule");
3352 os << "offset \"0.5ex\"\n"
3353 "width \"100line%\"\n"
3358 else if (t.cs() == "rule") {
3359 string const offset = (p.hasOpt() ? p.getArg('[', ']') : string());
3360 string const width = p.getArg('{', '}');
3361 string const thickness = p.getArg('{', '}');
3362 context.check_layout(os);
3363 begin_command_inset(os, "line", "rule");
3364 if (!offset.empty())
3365 os << "offset \"" << translate_len(offset) << "\"\n";
3366 os << "width \"" << translate_len(width) << "\"\n"
3367 "height \"" << translate_len(thickness) << "\"\n";
3371 // handle refstyle first to catch \eqref which can also occur
3372 // without refstyle. Only recognize these commands if
3373 // refstyle.sty was found in the preamble (otherwise \eqref
3374 // and user defined ref commands could be misdetected).
3375 else if ((where = is_known(t.cs(), known_refstyle_commands)) &&
3376 preamble.refstyle()) {
3377 context.check_layout(os);
3378 begin_command_inset(os, "ref", "formatted");
3379 os << "reference \"";
3380 os << known_refstyle_prefixes[where - known_refstyle_commands]
3382 os << convert_command_inset_arg(p.verbatim_item())
3385 preamble.registerAutomaticallyLoadedPackage("refstyle");
3388 // if refstyle is used, we must not convert \prettyref to a
3389 // formatted reference, since that would result in a refstyle command.
3390 else if ((where = is_known(t.cs(), known_ref_commands)) &&
3391 (t.cs() != "prettyref" || !preamble.refstyle())) {
3392 string const opt = p.getOpt();
3394 context.check_layout(os);
3395 begin_command_inset(os, "ref",
3396 known_coded_ref_commands[where - known_ref_commands]);
3397 os << "reference \""
3398 << convert_command_inset_arg(p.verbatim_item())
3401 if (t.cs() == "vref" || t.cs() == "vpageref")
3402 preamble.registerAutomaticallyLoadedPackage("varioref");
3403 else if (t.cs() == "prettyref")
3404 preamble.registerAutomaticallyLoadedPackage("prettyref");
3406 // LyX does not yet support optional arguments of ref commands
3407 output_ert_inset(os, t.asInput() + '[' + opt + "]{" +
3408 p.verbatim_item() + '}', context);
3412 else if (use_natbib &&
3413 is_known(t.cs(), known_natbib_commands) &&
3414 ((t.cs() != "citefullauthor" &&
3415 t.cs() != "citeyear" &&
3416 t.cs() != "citeyearpar") ||
3417 p.next_token().asInput() != "*")) {
3418 context.check_layout(os);
3419 string command = t.cs();
3420 if (p.next_token().asInput() == "*") {
3424 if (command == "citefullauthor")
3425 // alternative name for "\\citeauthor*"
3426 command = "citeauthor*";
3428 // text before the citation
3430 // text after the citation
3432 get_cite_arguments(p, true, before, after);
3434 if (command == "cite") {
3435 // \cite without optional argument means
3436 // \citet, \cite with at least one optional
3437 // argument means \citep.
3438 if (before.empty() && after.empty())
3443 if (before.empty() && after == "[]")
3444 // avoid \citet[]{a}
3446 else if (before == "[]" && after == "[]") {
3447 // avoid \citet[][]{a}
3451 // remove the brackets around after and before
3452 if (!after.empty()) {
3454 after.erase(after.length() - 1, 1);
3455 after = convert_command_inset_arg(after);
3457 if (!before.empty()) {
3459 before.erase(before.length() - 1, 1);
3460 before = convert_command_inset_arg(before);
3462 begin_command_inset(os, "citation", command);
3463 os << "after " << '"' << after << '"' << "\n";
3464 os << "before " << '"' << before << '"' << "\n";
3466 << convert_command_inset_arg(p.verbatim_item())
3469 // Need to set the cite engine if natbib is loaded by
3470 // the document class directly
3471 if (preamble.citeEngine() == "basic")
3472 preamble.citeEngine("natbib");
3475 else if (use_jurabib &&
3476 is_known(t.cs(), known_jurabib_commands) &&
3477 (t.cs() == "cite" || p.next_token().asInput() != "*")) {
3478 context.check_layout(os);
3479 string command = t.cs();
3480 if (p.next_token().asInput() == "*") {
3484 char argumentOrder = '\0';
3485 vector<string> const options =
3486 preamble.getPackageOptions("jurabib");
3487 if (find(options.begin(), options.end(),
3488 "natbiborder") != options.end())
3489 argumentOrder = 'n';
3490 else if (find(options.begin(), options.end(),
3491 "jurabiborder") != options.end())
3492 argumentOrder = 'j';
3494 // text before the citation
3496 // text after the citation
3498 get_cite_arguments(p, argumentOrder != 'j', before, after);
3500 string const citation = p.verbatim_item();
3501 if (!before.empty() && argumentOrder == '\0') {
3502 cerr << "Warning: Assuming argument order "
3503 "of jurabib version 0.6 for\n'"
3504 << command << before << after << '{'
3505 << citation << "}'.\n"
3506 "Add 'jurabiborder' to the jurabib "
3507 "package options if you used an\n"
3508 "earlier jurabib version." << endl;
3510 if (!after.empty()) {
3512 after.erase(after.length() - 1, 1);
3514 if (!before.empty()) {
3516 before.erase(before.length() - 1, 1);
3518 begin_command_inset(os, "citation", command);
3519 os << "after " << '"' << after << '"' << "\n";
3520 os << "before " << '"' << before << '"' << "\n";
3521 os << "key " << '"' << citation << '"' << "\n";
3523 // Need to set the cite engine if jurabib is loaded by
3524 // the document class directly
3525 if (preamble.citeEngine() == "basic")
3526 preamble.citeEngine("jurabib");
3529 else if (t.cs() == "cite"
3530 || t.cs() == "nocite") {
3531 context.check_layout(os);
3532 string after = convert_command_inset_arg(p.getArg('[', ']'));
3533 string key = convert_command_inset_arg(p.verbatim_item());
3534 // store the case that it is "\nocite{*}" to use it later for
3537 begin_command_inset(os, "citation", t.cs());
3538 os << "after " << '"' << after << '"' << "\n";
3539 os << "key " << '"' << key << '"' << "\n";
3541 } else if (t.cs() == "nocite")
3545 else if (t.cs() == "index" ||
3546 (t.cs() == "sindex" && preamble.use_indices() == "true")) {
3547 context.check_layout(os);
3548 string const arg = (t.cs() == "sindex" && p.hasOpt()) ?
3549 p.getArg('[', ']') : "";
3550 string const kind = arg.empty() ? "idx" : arg;
3551 begin_inset(os, "Index ");
3552 os << kind << "\nstatus collapsed\n";
3553 parse_text_in_inset(p, os, FLAG_ITEM, false, context, "Index");
3556 preamble.registerAutomaticallyLoadedPackage("splitidx");
3559 else if (t.cs() == "nomenclature") {
3560 context.check_layout(os);
3561 begin_command_inset(os, "nomenclature", "nomenclature");
3562 string prefix = convert_command_inset_arg(p.getArg('[', ']'));
3563 if (!prefix.empty())
3564 os << "prefix " << '"' << prefix << '"' << "\n";
3565 os << "symbol " << '"'
3566 << convert_command_inset_arg(p.verbatim_item());
3567 os << "\"\ndescription \""
3568 << convert_command_inset_arg(p.verbatim_item())
3571 preamble.registerAutomaticallyLoadedPackage("nomencl");
3574 else if (t.cs() == "label") {
3575 context.check_layout(os);
3576 begin_command_inset(os, "label", "label");
3578 << convert_command_inset_arg(p.verbatim_item())
3583 else if (t.cs() == "printindex" || t.cs() == "printsubindex") {
3584 context.check_layout(os);
3585 string commandname = t.cs();
3587 if (p.next_token().asInput() == "*") {
3592 begin_command_inset(os, "index_print", commandname);
3593 string const indexname = p.getArg('[', ']');
3595 if (indexname.empty())
3596 os << "type \"idx\"\n";
3598 os << "type \"" << indexname << "\"\n";
3601 skip_spaces_braces(p);
3602 preamble.registerAutomaticallyLoadedPackage("makeidx");
3603 if (preamble.use_indices() == "true")
3604 preamble.registerAutomaticallyLoadedPackage("splitidx");
3607 else if (t.cs() == "printnomenclature") {
3609 string width_type = "";
3610 context.check_layout(os);
3611 begin_command_inset(os, "nomencl_print", "printnomenclature");
3612 // case of a custom width
3614 width = p.getArg('[', ']');
3615 width = translate_len(width);
3616 width_type = "custom";
3618 // case of no custom width
3619 // the case of no custom width but the width set
3620 // via \settowidth{\nomlabelwidth}{***} cannot be supported
3621 // because the user could have set anything, not only the width
3622 // of the longest label (which would be width_type = "auto")
3623 string label = convert_command_inset_arg(p.getArg('{', '}'));
3624 if (label.empty() && width_type.empty())
3625 width_type = "none";
3626 os << "set_width \"" << width_type << "\"\n";
3627 if (width_type == "custom")
3628 os << "width \"" << width << '\"';
3630 skip_spaces_braces(p);
3631 preamble.registerAutomaticallyLoadedPackage("nomencl");
3634 else if ((t.cs() == "textsuperscript" || t.cs() == "textsubscript")) {
3635 context.check_layout(os);
3636 begin_inset(os, "script ");
3637 os << t.cs().substr(4) << '\n';
3638 newinsetlayout = findInsetLayout(context.textclass, t.cs(), true);
3639 parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
3641 if (t.cs() == "textsubscript")
3642 preamble.registerAutomaticallyLoadedPackage("subscript");
3645 else if ((where = is_known(t.cs(), known_quotes))) {
3646 context.check_layout(os);
3647 begin_inset(os, "Quotes ");
3648 os << known_coded_quotes[where - known_quotes];
3650 // LyX adds {} after the quote, so we have to eat
3651 // spaces here if there are any before a possible
3653 eat_whitespace(p, os, context, false);
3657 else if ((where = is_known(t.cs(), known_sizes)) &&
3658 context.new_layout_allowed) {
3659 context.check_layout(os);
3660 TeXFont const oldFont = context.font;
3661 context.font.size = known_coded_sizes[where - known_sizes];
3662 output_font_change(os, oldFont, context.font);
3663 eat_whitespace(p, os, context, false);
3666 else if ((where = is_known(t.cs(), known_font_families)) &&
3667 context.new_layout_allowed) {
3668 context.check_layout(os);
3669 TeXFont const oldFont = context.font;
3670 context.font.family =
3671 known_coded_font_families[where - known_font_families];
3672 output_font_change(os, oldFont, context.font);
3673 eat_whitespace(p, os, context, false);
3676 else if ((where = is_known(t.cs(), known_font_series)) &&
3677 context.new_layout_allowed) {
3678 context.check_layout(os);
3679 TeXFont const oldFont = context.font;
3680 context.font.series =
3681 known_coded_font_series[where - known_font_series];
3682 output_font_change(os, oldFont, context.font);
3683 eat_whitespace(p, os, context, false);
3686 else if ((where = is_known(t.cs(), known_font_shapes)) &&
3687 context.new_layout_allowed) {
3688 context.check_layout(os);
3689 TeXFont const oldFont = context.font;
3690 context.font.shape =
3691 known_coded_font_shapes[where - known_font_shapes];
3692 output_font_change(os, oldFont, context.font);
3693 eat_whitespace(p, os, context, false);
3695 else if ((where = is_known(t.cs(), known_old_font_families)) &&
3696 context.new_layout_allowed) {
3697 context.check_layout(os);
3698 TeXFont const oldFont = context.font;
3699 context.font.init();
3700 context.font.size = oldFont.size;
3701 context.font.family =
3702 known_coded_font_families[where - known_old_font_families];
3703 output_font_change(os, oldFont, context.font);
3704 eat_whitespace(p, os, context, false);
3707 else if ((where = is_known(t.cs(), known_old_font_series)) &&
3708 context.new_layout_allowed) {
3709 context.check_layout(os);
3710 TeXFont const oldFont = context.font;
3711 context.font.init();
3712 context.font.size = oldFont.size;
3713 context.font.series =
3714 known_coded_font_series[where - known_old_font_series];
3715 output_font_change(os, oldFont, context.font);
3716 eat_whitespace(p, os, context, false);
3719 else if ((where = is_known(t.cs(), known_old_font_shapes)) &&
3720 context.new_layout_allowed) {
3721 context.check_layout(os);
3722 TeXFont const oldFont = context.font;
3723 context.font.init();
3724 context.font.size = oldFont.size;
3725 context.font.shape =
3726 known_coded_font_shapes[where - known_old_font_shapes];
3727 output_font_change(os, oldFont, context.font);
3728 eat_whitespace(p, os, context, false);
3731 else if (t.cs() == "selectlanguage") {
3732 context.check_layout(os);
3733 // save the language for the case that a
3734 // \foreignlanguage is used
3735 context.font.language = babel2lyx(p.verbatim_item());
3736 os << "\n\\lang " << context.font.language << "\n";
3739 else if (t.cs() == "foreignlanguage") {
3740 string const lang = babel2lyx(p.verbatim_item());
3741 parse_text_attributes(p, os, FLAG_ITEM, outer,
3743 context.font.language, lang);
3746 else if (prefixIs(t.cs(), "text") && preamble.usePolyglossia()
3747 && is_known(t.cs().substr(4), preamble.polyglossia_languages)) {
3748 // scheme is \textLANGUAGE{text} where LANGUAGE is in polyglossia_languages[]
3750 // We have to output the whole command if it has an option
3751 // because LyX doesn't support this yet, see bug #8214,
3752 // only if there is a single option specifying a variant, we can handle it.
3754 string langopts = p.getOpt();
3755 // check if the option contains a variant, if yes, extract it
3756 string::size_type pos_var = langopts.find("variant");
3757 string::size_type i = langopts.find(',');
3758 string::size_type k = langopts.find('=', pos_var);
3759 if (pos_var != string::npos && i == string::npos) {
3761 variant = langopts.substr(k + 1, langopts.length() - k - 2);
3762 lang = preamble.polyglossia2lyx(variant);
3763 parse_text_attributes(p, os, FLAG_ITEM, outer,
3765 context.font.language, lang);
3767 output_ert_inset(os, t.asInput() + langopts, context);
3769 lang = preamble.polyglossia2lyx(t.cs().substr(4, string::npos));
3770 parse_text_attributes(p, os, FLAG_ITEM, outer,
3772 context.font.language, lang);
3776 else if (t.cs() == "inputencoding") {
3777 // nothing to write here
3778 string const enc = subst(p.verbatim_item(), "\n", " ");
3779 p.setEncoding(enc, Encoding::inputenc);
3782 else if (is_known(t.cs(), known_special_chars) ||
3783 (t.cs() == "protect" &&
3784 p.next_token().cat() == catEscape &&
3785 is_known(p.next_token().cs(), known_special_protect_chars))) {
3786 // LyX sometimes puts a \protect in front, so we have to ignore it
3788 t.cs() == "protect" ? p.get_token().cs() : t.cs(),
3789 known_special_chars);
3790 context.check_layout(os);
3791 os << known_coded_special_chars[where - known_special_chars];
3792 skip_spaces_braces(p);
3795 else if ((t.cs() == "nobreakdash" && p.next_token().asInput() == "-") ||
3796 (t.cs() == "protect" && p.next_token().asInput() == "\\nobreakdash" &&
3797 p.next_next_token().asInput() == "-") ||
3798 (t.cs() == "@" && p.next_token().asInput() == ".")) {
3799 // LyX sometimes puts a \protect in front, so we have to ignore it
3800 if (t.cs() == "protect")
3802 context.check_layout(os);
3803 if (t.cs() == "nobreakdash")
3804 os << "\\SpecialChar nobreakdash\n";
3806 os << "\\SpecialChar endofsentence\n";
3810 else if (t.cs() == "textquotedbl") {
3811 context.check_layout(os);
3816 else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
3817 || t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
3818 || t.cs() == "%" || t.cs() == "-") {
3819 context.check_layout(os);
3821 os << "\\SpecialChar softhyphen\n";
3826 else if (t.cs() == "char") {
3827 context.check_layout(os);
3828 if (p.next_token().character() == '`') {
3830 if (p.next_token().cs() == "\"") {
3835 output_ert_inset(os, "\\char`", context);
3838 output_ert_inset(os, "\\char", context);
3842 else if (t.cs() == "verb") {
3843 context.check_layout(os);
3844 // set catcodes to verbatim early, just in case.
3845 p.setCatcodes(VERBATIM_CATCODES);
3846 string delim = p.get_token().asInput();
3847 Parser::Arg arg = p.verbatimStuff(delim);
3849 output_ert_inset(os, "\\verb" + delim
3850 + arg.second + delim, context);
3852 cerr << "invalid \\verb command. Skipping" << endl;
3855 // Problem: \= creates a tabstop inside the tabbing environment
3856 // and else an accent. In the latter case we really would want
3857 // \={o} instead of \= o.
3858 else if (t.cs() == "=" && (flags & FLAG_TABBING))
3859 output_ert_inset(os, t.asInput(), context);
3861 else if (t.cs() == "\\") {
3862 context.check_layout(os);
3864 output_ert_inset(os, "\\\\" + p.getOpt(), context);
3865 else if (p.next_token().asInput() == "*") {
3867 // getOpt() eats the following space if there
3868 // is no optional argument, but that is OK
3869 // here since it has no effect in the output.
3870 output_ert_inset(os, "\\\\*" + p.getOpt(), context);
3873 begin_inset(os, "Newline newline");
3878 else if (t.cs() == "newline" ||
3879 (t.cs() == "linebreak" && !p.hasOpt())) {
3880 context.check_layout(os);
3881 begin_inset(os, "Newline ");
3884 skip_spaces_braces(p);
3887 else if (t.cs() == "input" || t.cs() == "include"
3888 || t.cs() == "verbatiminput") {
3889 string name = t.cs();
3890 if (t.cs() == "verbatiminput"
3891 && p.next_token().asInput() == "*")
3892 name += p.get_token().asInput();
3893 context.check_layout(os);
3894 string filename(normalize_filename(p.getArg('{', '}')));
3895 string const path = getMasterFilePath(true);
3896 // We want to preserve relative / absolute filenames,
3897 // therefore path is only used for testing
3898 if ((t.cs() == "include" || t.cs() == "input") &&
3899 !makeAbsPath(filename, path).exists()) {
3900 // The file extension is probably missing.
3901 // Now try to find it out.
3902 string const tex_name =
3903 find_file(filename, path,
3904 known_tex_extensions);
3905 if (!tex_name.empty())
3906 filename = tex_name;
3908 bool external = false;
3910 if (makeAbsPath(filename, path).exists()) {
3911 string const abstexname =
3912 makeAbsPath(filename, path).absFileName();
3913 string const absfigname =
3914 changeExtension(abstexname, ".fig");
3915 fix_child_filename(filename);
3916 string const lyxname = changeExtension(filename,
3917 roundtripMode() ? ".lyx.lyx" : ".lyx");
3918 string const abslyxname = makeAbsPath(
3919 lyxname, getParentFilePath(false)).absFileName();
3921 if (!skipChildren())
3922 external = FileName(absfigname).exists();
3923 if (t.cs() == "input" && !skipChildren()) {
3924 string const ext = getExtension(abstexname);
3926 // Combined PS/LaTeX:
3927 // x.eps, x.pstex_t (old xfig)
3928 // x.pstex, x.pstex_t (new xfig, e.g. 3.2.5)
3929 FileName const absepsname(
3930 changeExtension(abstexname, ".eps"));
3931 FileName const abspstexname(
3932 changeExtension(abstexname, ".pstex"));
3933 bool const xfigeps =
3934 (absepsname.exists() ||
3935 abspstexname.exists()) &&
3938 // Combined PDF/LaTeX:
3939 // x.pdf, x.pdftex_t (old xfig)
3940 // x.pdf, x.pdf_t (new xfig, e.g. 3.2.5)
3941 FileName const abspdfname(
3942 changeExtension(abstexname, ".pdf"));
3943 bool const xfigpdf =
3944 abspdfname.exists() &&
3945 (ext == "pdftex_t" || ext == "pdf_t");
3949 // Combined PS/PDF/LaTeX:
3950 // x_pspdftex.eps, x_pspdftex.pdf, x.pspdftex
3951 string const absbase2(
3952 removeExtension(abstexname) + "_pspdftex");
3953 FileName const abseps2name(
3954 addExtension(absbase2, ".eps"));
3955 FileName const abspdf2name(
3956 addExtension(absbase2, ".pdf"));
3957 bool const xfigboth =
3958 abspdf2name.exists() &&
3959 abseps2name.exists() && ext == "pspdftex";
3961 xfig = xfigpdf || xfigeps || xfigboth;
3962 external = external && xfig;
3965 outname = changeExtension(filename, ".fig");
3966 FileName abssrc(changeExtension(abstexname, ".fig"));
3967 copy_file(abssrc, outname);
3969 // Don't try to convert, the result
3970 // would be full of ERT.
3972 FileName abssrc(abstexname);
3973 copy_file(abssrc, outname);
3974 } else if (t.cs() != "verbatiminput" &&
3976 tex2lyx(abstexname, FileName(abslyxname),
3979 // no need to call copy_file
3980 // tex2lyx creates the file
3983 FileName abssrc(abstexname);
3984 copy_file(abssrc, outname);
3987 cerr << "Warning: Could not find included file '"
3988 << filename << "'." << endl;
3992 begin_inset(os, "External\n");
3993 os << "\ttemplate XFig\n"
3994 << "\tfilename " << outname << '\n';
3995 registerExternalTemplatePackages("XFig");
3997 begin_command_inset(os, "include", name);
3998 outname = subst(outname, "\"", "\\\"");
3999 os << "preview false\n"
4000 "filename \"" << outname << "\"\n";
4001 if (t.cs() == "verbatiminput")
4002 preamble.registerAutomaticallyLoadedPackage("verbatim");
4007 else if (t.cs() == "bibliographystyle") {
4008 // store new bibliographystyle
4009 bibliographystyle = p.verbatim_item();
4010 // If any other command than \bibliography, \addcontentsline
4011 // and \nocite{*} follows, we need to output the style
4012 // (because it might be used by that command).
4013 // Otherwise, it will automatically be output by LyX.
4016 for (Token t2 = p.get_token(); p.good(); t2 = p.get_token()) {
4017 if (t2.cat() == catBegin)
4019 if (t2.cat() != catEscape)
4021 if (t2.cs() == "nocite") {
4022 if (p.getArg('{', '}') == "*")
4024 } else if (t2.cs() == "bibliography")
4026 else if (t2.cs() == "phantomsection") {
4030 else if (t2.cs() == "addcontentsline") {
4031 // get the 3 arguments of \addcontentsline
4034 contentslineContent = p.getArg('{', '}');
4035 // if the last argument is not \refname we must output
4036 if (contentslineContent == "\\refname")
4043 output_ert_inset(os,
4044 "\\bibliographystyle{" + bibliographystyle + '}',
4049 else if (t.cs() == "phantomsection") {
4050 // we only support this if it occurs between
4051 // \bibliographystyle and \bibliography
4052 if (bibliographystyle.empty())
4053 output_ert_inset(os, "\\phantomsection", context);
4056 else if (t.cs() == "addcontentsline") {
4057 context.check_layout(os);
4058 // get the 3 arguments of \addcontentsline
4059 string const one = p.getArg('{', '}');
4060 string const two = p.getArg('{', '}');
4061 string const three = p.getArg('{', '}');
4062 // only if it is a \refname, we support if for the bibtex inset
4063 if (contentslineContent != "\\refname") {
4064 output_ert_inset(os,
4065 "\\addcontentsline{" + one + "}{" + two + "}{"+ three + '}',
4070 else if (t.cs() == "bibliography") {
4071 context.check_layout(os);
4073 begin_command_inset(os, "bibtex", "bibtex");
4074 if (!btprint.empty()) {
4075 os << "btprint " << '"' << "btPrintAll" << '"' << "\n";
4076 // clear the string because the next BibTeX inset can be without the
4077 // \nocite{*} option
4080 os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
4081 // Do we have addcontentsline?
4082 if (contentslineContent == "\\refname") {
4083 BibOpts = "bibtotoc";
4084 // clear string because next BibTeX inset can be without addcontentsline
4085 contentslineContent.clear();
4087 // Do we have a bibliographystyle set?
4088 if (!bibliographystyle.empty()) {
4089 if (BibOpts.empty())
4090 BibOpts = bibliographystyle;
4092 BibOpts = BibOpts + ',' + bibliographystyle;
4093 // clear it because each bibtex entry has its style
4094 // and we need an empty string to handle \phantomsection
4095 bibliographystyle.clear();
4097 os << "options " << '"' << BibOpts << '"' << "\n";
4101 else if (t.cs() == "parbox") {
4102 // Test whether this is an outer box of a shaded box
4104 // swallow arguments
4105 while (p.hasOpt()) {
4107 p.skip_spaces(true);
4110 p.skip_spaces(true);
4112 if (p.next_token().cat() == catBegin)
4114 p.skip_spaces(true);
4115 Token to = p.get_token();
4116 bool shaded = false;
4117 if (to.asInput() == "\\begin") {
4118 p.skip_spaces(true);
4119 if (p.getArg('{', '}') == "shaded")
4124 parse_outer_box(p, os, FLAG_ITEM, outer,
4125 context, "parbox", "shaded");
4127 parse_box(p, os, 0, FLAG_ITEM, outer, context,
4131 else if (t.cs() == "fbox" || t.cs() == "mbox" ||
4132 t.cs() == "ovalbox" || t.cs() == "Ovalbox" ||
4133 t.cs() == "shadowbox" || t.cs() == "doublebox")
4134 parse_outer_box(p, os, FLAG_ITEM, outer, context, t.cs(), "");
4136 else if (t.cs() == "framebox") {
4137 if (p.next_token().character() == '(') {
4138 //the syntax is: \framebox(x,y)[position]{content}
4139 string arg = t.asInput();
4140 arg += p.getFullParentheseArg();
4141 arg += p.getFullOpt();
4142 eat_whitespace(p, os, context, false);
4143 output_ert_inset(os, arg + '{', context);
4144 parse_text(p, os, FLAG_ITEM, outer, context);
4145 output_ert_inset(os, "}", context);
4147 //the syntax is: \framebox[width][position]{content}
4148 string special = p.getFullOpt();
4149 special += p.getOpt();
4150 parse_outer_box(p, os, FLAG_ITEM, outer,
4151 context, t.cs(), special);
4155 //\makebox() is part of the picture environment and different from \makebox{}
4156 //\makebox{} will be parsed by parse_box
4157 else if (t.cs() == "makebox") {
4158 if (p.next_token().character() == '(') {
4159 //the syntax is: \makebox(x,y)[position]{content}
4160 string arg = t.asInput();
4161 arg += p.getFullParentheseArg();
4162 arg += p.getFullOpt();
4163 eat_whitespace(p, os, context, false);
4164 output_ert_inset(os, arg + '{', context);
4165 parse_text(p, os, FLAG_ITEM, outer, context);
4166 output_ert_inset(os, "}", context);
4168 //the syntax is: \makebox[width][position]{content}
4169 parse_box(p, os, 0, FLAG_ITEM, outer, context,
4173 else if (t.cs() == "smallskip" ||
4174 t.cs() == "medskip" ||
4175 t.cs() == "bigskip" ||
4176 t.cs() == "vfill") {
4177 context.check_layout(os);
4178 begin_inset(os, "VSpace ");
4181 skip_spaces_braces(p);
4184 else if ((where = is_known(t.cs(), known_spaces))) {
4185 context.check_layout(os);
4186 begin_inset(os, "space ");
4187 os << '\\' << known_coded_spaces[where - known_spaces]
4190 // LaTeX swallows whitespace after all spaces except
4191 // "\\,". We have to do that here, too, because LyX
4192 // adds "{}" which would make the spaces significant.
4194 eat_whitespace(p, os, context, false);
4195 // LyX adds "{}" after all spaces except "\\ " and
4196 // "\\,", so we have to remove "{}".
4197 // "\\,{}" is equivalent to "\\," in LaTeX, so we
4198 // remove the braces after "\\,", too.
4203 else if (t.cs() == "newpage" ||
4204 (t.cs() == "pagebreak" && !p.hasOpt()) ||
4205 t.cs() == "clearpage" ||
4206 t.cs() == "cleardoublepage") {
4207 context.check_layout(os);
4208 begin_inset(os, "Newpage ");
4211 skip_spaces_braces(p);
4214 else if (t.cs() == "DeclareRobustCommand" ||
4215 t.cs() == "DeclareRobustCommandx" ||
4216 t.cs() == "newcommand" ||
4217 t.cs() == "newcommandx" ||
4218 t.cs() == "providecommand" ||
4219 t.cs() == "providecommandx" ||
4220 t.cs() == "renewcommand" ||
4221 t.cs() == "renewcommandx") {
4222 // DeclareRobustCommand, DeclareRobustCommandx,
4223 // providecommand and providecommandx could be handled
4224 // by parse_command(), but we need to call
4225 // add_known_command() here.
4226 string name = t.asInput();
4227 if (p.next_token().asInput() == "*") {
4228 // Starred form. Eat '*'
4232 string const command = p.verbatim_item();
4233 string const opt1 = p.getFullOpt();
4234 string const opt2 = p.getFullOpt();
4235 add_known_command(command, opt1, !opt2.empty());
4236 string const ert = name + '{' + command + '}' +
4238 '{' + p.verbatim_item() + '}';
4240 if (t.cs() == "DeclareRobustCommand" ||
4241 t.cs() == "DeclareRobustCommandx" ||
4242 t.cs() == "providecommand" ||
4243 t.cs() == "providecommandx" ||
4244 name[name.length()-1] == '*')
4245 output_ert_inset(os, ert, context);
4247 context.check_layout(os);
4248 begin_inset(os, "FormulaMacro");
4254 else if (t.cs() == "let" && p.next_token().asInput() != "*") {
4255 // let could be handled by parse_command(),
4256 // but we need to call add_known_command() here.
4257 string ert = t.asInput();
4260 if (p.next_token().cat() == catBegin) {
4261 name = p.verbatim_item();
4262 ert += '{' + name + '}';
4264 name = p.verbatim_item();
4269 if (p.next_token().cat() == catBegin) {
4270 command = p.verbatim_item();
4271 ert += '{' + command + '}';
4273 command = p.verbatim_item();
4276 // If command is known, make name known too, to parse
4277 // its arguments correctly. For this reason we also
4278 // have commands in syntax.default that are hardcoded.
4279 CommandMap::iterator it = known_commands.find(command);
4280 if (it != known_commands.end())
4281 known_commands[t.asInput()] = it->second;
4282 output_ert_inset(os, ert, context);
4285 else if (t.cs() == "hspace" || t.cs() == "vspace") {
4288 string name = t.asInput();
4289 string const length = p.verbatim_item();
4292 bool valid = splitLatexLength(length, valstring, unit);
4293 bool known_hspace = false;
4294 bool known_vspace = false;
4295 bool known_unit = false;
4298 istringstream iss(valstring);
4301 if (t.cs()[0] == 'h') {
4302 if (unit == "\\fill") {
4307 known_hspace = true;
4310 if (unit == "\\smallskipamount") {
4312 known_vspace = true;
4313 } else if (unit == "\\medskipamount") {
4315 known_vspace = true;
4316 } else if (unit == "\\bigskipamount") {
4318 known_vspace = true;
4319 } else if (unit == "\\fill") {
4321 known_vspace = true;
4325 if (!known_hspace && !known_vspace) {
4326 switch (unitFromString(unit)) {
4342 //unitFromString(unit) fails for relative units like Length::PCW
4343 // therefore handle them separately
4344 if (unit == "\\paperwidth" || unit == "\\columnwidth"
4345 || unit == "\\textwidth" || unit == "\\linewidth"
4346 || unit == "\\textheight" || unit == "\\paperheight")
4354 // check for glue lengths
4355 bool is_gluelength = false;
4356 string gluelength = length;
4357 string::size_type i = length.find(" minus");
4358 if (i == string::npos) {
4359 i = length.find(" plus");
4360 if (i != string::npos)
4361 is_gluelength = true;
4363 is_gluelength = true;
4364 // if yes transform "9xx minus 8yy plus 7zz"
4366 if (is_gluelength) {
4367 i = gluelength.find(" minus");
4368 if (i != string::npos)
4369 gluelength.replace(i, 7, "-");
4370 i = gluelength.find(" plus");
4371 if (i != string::npos)
4372 gluelength.replace(i, 6, "+");
4375 if (t.cs()[0] == 'h' && (known_unit || known_hspace || is_gluelength)) {
4376 // Literal horizontal length or known variable
4377 context.check_layout(os);
4378 begin_inset(os, "space ");
4386 if (known_unit && !known_hspace)
4387 os << "\n\\length " << translate_len(length);
4389 os << "\n\\length " << gluelength;
4391 } else if (known_unit || known_vspace || is_gluelength) {
4392 // Literal vertical length or known variable
4393 context.check_layout(os);
4394 begin_inset(os, "VSpace ");
4397 if (known_unit && !known_vspace)
4398 os << translate_len(length);
4405 // LyX can't handle other length variables in Inset VSpace/space
4410 output_ert_inset(os, name + '{' + unit + '}', context);
4411 else if (value == -1.0)
4412 output_ert_inset(os, name + "{-" + unit + '}', context);
4414 output_ert_inset(os, name + '{' + valstring + unit + '}', context);
4416 output_ert_inset(os, name + '{' + length + '}', context);
4420 // The single '=' is meant here.
4421 else if ((newinsetlayout = findInsetLayout(context.textclass, starredname, true))) {
4425 context.check_layout(os);
4426 docstring const name = newinsetlayout->name();
4427 bool const caption = name.find(from_ascii("Caption:")) == 0;
4429 begin_inset(os, "Caption ");
4430 os << to_utf8(name.substr(8)) << '\n';
4432 begin_inset(os, "Flex ");
4433 os << to_utf8(name) << '\n'
4434 << "status collapsed\n";
4436 if (newinsetlayout->isPassThru()) {
4437 // set catcodes to verbatim early, just in case.
4438 p.setCatcodes(VERBATIM_CATCODES);
4439 string delim = p.get_token().asInput();
4441 cerr << "Warning: bad delimiter for command " << t.asInput() << endl;
4442 //FIXME: handle error condition
4443 string const arg = p.verbatimStuff("}").second;
4444 Context newcontext(true, context.textclass);
4445 if (newinsetlayout->forcePlainLayout())
4446 newcontext.layout = &context.textclass.plainLayout();
4447 output_ert(os, arg, newcontext);
4449 parse_text_in_inset(p, os, FLAG_ITEM, false, context, newinsetlayout);
4455 else if (t.cs() == "includepdf") {
4457 string const arg = p.getArg('[', ']');
4458 map<string, string> opts;
4459 vector<string> keys;
4460 split_map(arg, opts, keys);
4461 string name = normalize_filename(p.verbatim_item());
4462 string const path = getMasterFilePath(true);
4463 // We want to preserve relative / absolute filenames,
4464 // therefore path is only used for testing
4465 if (!makeAbsPath(name, path).exists()) {
4466 // The file extension is probably missing.
4467 // Now try to find it out.
4468 char const * const pdfpages_format[] = {"pdf", 0};
4469 string const pdftex_name =
4470 find_file(name, path, pdfpages_format);
4471 if (!pdftex_name.empty()) {
4476 FileName const absname = makeAbsPath(name, path);
4477 if (absname.exists())
4479 fix_child_filename(name);
4480 copy_file(absname, name);
4482 cerr << "Warning: Could not find file '"
4483 << name << "'." << endl;
4485 context.check_layout(os);
4486 begin_inset(os, "External\n\ttemplate ");
4487 os << "PDFPages\n\tfilename "
4489 // parse the options
4490 if (opts.find("pages") != opts.end())
4491 os << "\textra LaTeX \"pages="
4492 << opts["pages"] << "\"\n";
4493 if (opts.find("angle") != opts.end())
4494 os << "\trotateAngle "
4495 << opts["angle"] << '\n';
4496 if (opts.find("origin") != opts.end()) {
4498 string const opt = opts["origin"];
4499 if (opt == "tl") ss << "topleft";
4500 if (opt == "bl") ss << "bottomleft";
4501 if (opt == "Bl") ss << "baselineleft";
4502 if (opt == "c") ss << "center";
4503 if (opt == "tc") ss << "topcenter";
4504 if (opt == "bc") ss << "bottomcenter";
4505 if (opt == "Bc") ss << "baselinecenter";
4506 if (opt == "tr") ss << "topright";
4507 if (opt == "br") ss << "bottomright";
4508 if (opt == "Br") ss << "baselineright";
4509 if (!ss.str().empty())
4510 os << "\trotateOrigin " << ss.str() << '\n';
4512 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
4514 if (opts.find("width") != opts.end())
4516 << translate_len(opts["width"]) << '\n';
4517 if (opts.find("height") != opts.end())
4519 << translate_len(opts["height"]) << '\n';
4520 if (opts.find("keepaspectratio") != opts.end())
4521 os << "\tkeepAspectRatio\n";
4523 context.check_layout(os);
4524 registerExternalTemplatePackages("PDFPages");
4527 else if (t.cs() == "loadgame") {
4529 string name = normalize_filename(p.verbatim_item());
4530 string const path = getMasterFilePath(true);
4531 // We want to preserve relative / absolute filenames,
4532 // therefore path is only used for testing
4533 if (!makeAbsPath(name, path).exists()) {
4534 // The file extension is probably missing.
4535 // Now try to find it out.
4536 char const * const lyxskak_format[] = {"fen", 0};
4537 string const lyxskak_name =
4538 find_file(name, path, lyxskak_format);
4539 if (!lyxskak_name.empty())
4540 name = lyxskak_name;
4542 FileName const absname = makeAbsPath(name, path);
4543 if (absname.exists())
4545 fix_child_filename(name);
4546 copy_file(absname, name);
4548 cerr << "Warning: Could not find file '"
4549 << name << "'." << endl;
4550 context.check_layout(os);
4551 begin_inset(os, "External\n\ttemplate ");
4552 os << "ChessDiagram\n\tfilename "
4555 context.check_layout(os);
4556 // after a \loadgame follows a \showboard
4557 if (p.get_token().asInput() == "showboard")
4559 registerExternalTemplatePackages("ChessDiagram");
4563 // try to see whether the string is in unicodesymbols
4564 // Only use text mode commands, since we are in text mode here,
4565 // and math commands may be invalid (bug 6797)
4566 string name = t.asInput();
4567 // handle the dingbats, cyrillic and greek
4568 if (name == "\\ding" || name == "\\textcyr" ||
4569 (name == "\\textgreek" && !preamble.usePolyglossia()))
4570 name = name + '{' + p.getArg('{', '}') + '}';
4571 // handle the ifsym characters
4572 else if (name == "\\textifsymbol") {
4573 string const optif = p.getFullOpt();
4574 string const argif = p.getArg('{', '}');
4575 name = name + optif + '{' + argif + '}';
4577 // handle the \ascii characters
4578 // the case of \ascii within braces, as LyX outputs it, is already
4579 // handled for t.cat() == catBegin
4580 else if (name == "\\ascii") {
4581 // the code is "\asci\xxx"
4582 name = "{" + name + p.get_token().asInput() + "}";
4585 // handle some TIPA special characters
4586 else if (preamble.isPackageUsed("tipa")) {
4587 if (name == "\\textglobfall") {
4590 } else if (name == "\\s") {
4591 // fromLaTeXCommand() does not yet
4592 // recognize tipa short cuts
4593 name = "\\textsyllabic";
4594 } else if (name == "\\=" &&
4595 p.next_token().asInput() == "*") {
4596 // fromLaTeXCommand() does not yet
4597 // recognize tipa short cuts
4600 } else if (name == "\\textdoublevertline") {
4601 // FIXME: This is not correct,
4602 // \textvertline is higher than \textbardbl
4603 name = "\\textbardbl";
4605 } else if (name == "\\!" ) {
4606 if (p.next_token().asInput() == "b") {
4607 p.get_token(); // eat 'b'
4610 } else if (p.next_token().asInput() == "d") {
4614 } else if (p.next_token().asInput() == "g") {
4618 } else if (p.next_token().asInput() == "G") {
4620 name = "\\texthtscg";
4622 } else if (p.next_token().asInput() == "j") {
4624 name = "\\texthtbardotlessj";
4626 } else if (p.next_token().asInput() == "o") {
4628 name = "\\textbullseye";
4631 } else if (name == "\\*" ) {
4632 if (p.next_token().asInput() == "k") {
4634 name = "\\textturnk";
4636 } else if (p.next_token().asInput() == "r") {
4637 p.get_token(); // eat 'b'
4638 name = "\\textturnr";
4640 } else if (p.next_token().asInput() == "t") {
4642 name = "\\textturnt";
4644 } else if (p.next_token().asInput() == "w") {
4646 name = "\\textturnw";
4651 if ((name.size() == 2 &&
4652 contains("\"'.=^`bcdHkrtuv~", name[1]) &&
4653 p.next_token().asInput() != "*") ||
4654 is_known(name.substr(1), known_tipa_marks)) {
4655 // name is a command that corresponds to a
4656 // combining character in unicodesymbols.
4657 // Append the argument, fromLaTeXCommand()
4658 // will either convert it to a single
4659 // character or a combining sequence.
4660 name += '{' + p.verbatim_item() + '}';
4662 // now get the character from unicodesymbols
4666 docstring s = encodings.fromLaTeXCommand(from_utf8(name),
4667 Encodings::TEXT_CMD, termination, rem, &req);
4669 context.check_layout(os);
4672 output_ert_inset(os, to_utf8(rem), context);
4674 skip_spaces_braces(p);
4675 for (set<string>::const_iterator it = req.begin(); it != req.end(); ++it)
4676 preamble.registerAutomaticallyLoadedPackage(*it);
4678 //cerr << "#: " << t << " mode: " << mode << endl;
4679 // heuristic: read up to next non-nested space
4681 string s = t.asInput();
4682 string z = p.verbatim_item();
4683 while (p.good() && z != " " && !z.empty()) {
4684 //cerr << "read: " << z << endl;
4686 z = p.verbatim_item();
4688 cerr << "found ERT: " << s << endl;
4689 output_ert_inset(os, s + ' ', context);
4692 if (t.asInput() == name &&
4693 p.next_token().asInput() == "*") {
4694 // Starred commands like \vspace*{}
4695 p.get_token(); // Eat '*'
4698 if (!parse_command(name, p, os, outer, context))
4699 output_ert_inset(os, name, context);
4703 if (flags & FLAG_LEAVE) {
4704 flags &= ~FLAG_LEAVE;
4711 string guessLanguage(Parser & p, string const & lang)
4713 typedef std::map<std::string, size_t> LangMap;
4714 // map from language names to number of characters
4717 for (char const * const * i = supported_CJK_languages; *i; i++)
4718 used[string(*i)] = 0;
4721 Token const t = p.get_token();
4722 // comments are not counted for any language
4723 if (t.cat() == catComment)
4725 // commands are not counted as well, but we need to detect
4726 // \begin{CJK} and switch encoding if needed
4727 if (t.cat() == catEscape) {
4728 if (t.cs() == "inputencoding") {
4729 string const enc = subst(p.verbatim_item(), "\n", " ");
4730 p.setEncoding(enc, Encoding::inputenc);
4733 if (t.cs() != "begin")
4736 // Non-CJK content is counted for lang.
4737 // We do not care about the real language here:
4738 // If we have more non-CJK contents than CJK contents,
4739 // we simply use the language that was specified as
4740 // babel main language.
4741 used[lang] += t.asInput().length();
4744 // Now we are starting an environment
4746 string const name = p.getArg('{', '}');
4747 if (name != "CJK") {
4751 // It is a CJK environment
4753 /* name = */ p.getArg('{', '}');
4754 string const encoding = p.getArg('{', '}');
4755 /* mapping = */ p.getArg('{', '}');
4756 string const encoding_old = p.getEncoding();
4757 char const * const * const where =
4758 is_known(encoding, supported_CJK_encodings);
4760 p.setEncoding(encoding, Encoding::CJK);
4762 p.setEncoding("UTF-8");
4763 string const text = p.ertEnvironment("CJK");
4764 p.setEncoding(encoding_old);
4767 // ignore contents in unknown CJK encoding
4770 // the language of the text
4772 supported_CJK_languages[where - supported_CJK_encodings];
4773 used[cjk] += text.length();
4775 LangMap::const_iterator use = used.begin();
4776 for (LangMap::const_iterator it = used.begin(); it != used.end(); ++it) {
4777 if (it->second > use->second)