2 * \file tex2lyx/text.cpp
3 * This file is part of LyX, the document processor.
4 * Licence details can be found in the file COPYING.
7 * \author Jean-Marc Lasgouttes
10 * Full author contact details are available in file CREDITS.
20 #include "FloatList.h"
24 #include "support/lstrings.h"
25 #include "support/convert.h"
26 #include "support/filetools.h"
37 using support::addExtension;
38 using support::changeExtension;
39 using support::FileName;
40 using support::makeAbsPath;
41 using support::makeRelPath;
43 using support::suffixIs;
44 using support::contains;
48 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
49 Context const & context)
51 Context newcontext(true, context.textclass);
52 newcontext.font = context.font;
53 parse_text(p, os, flags, outer, newcontext);
54 newcontext.check_end_layout(os);
60 /// parses a paragraph snippet, useful for example for \\emph{...}
61 void parse_text_snippet(Parser & p, ostream & os, unsigned flags, bool outer,
64 Context newcontext(context);
65 // Don't inherit the extra stuff
66 newcontext.extra_stuff.clear();
67 parse_text(p, os, flags, outer, newcontext);
68 // Make sure that we don't create invalid .lyx files
69 context.need_layout = newcontext.need_layout;
70 context.need_end_layout = newcontext.need_end_layout;
75 * Thin wrapper around parse_text_snippet() using a string.
77 * We completely ignore \c context.need_layout and \c context.need_end_layout,
78 * because our return value is not used directly (otherwise the stream version
79 * of parse_text_snippet() could be used). That means that the caller needs
80 * to do layout management manually.
81 * This is intended to parse text that does not create any layout changes.
83 string parse_text_snippet(Parser & p, unsigned flags, const bool outer,
86 Context newcontext(context);
87 newcontext.need_layout = false;
88 newcontext.need_end_layout = false;
89 newcontext.new_layout_allowed = false;
90 // Avoid warning by Context::~Context()
91 newcontext.extra_stuff.clear();
93 parse_text_snippet(p, os, flags, outer, newcontext);
98 char const * const known_latex_commands[] = { "ref", "cite", "label",
99 "index", "printindex", "pageref", "url", "vref", "vpageref", "prettyref",
104 * We can't put these into known_latex_commands because the argument order
105 * is reversed in lyx if there are 2 arguments.
106 * The starred forms are also known.
108 char const * const known_natbib_commands[] = { "cite", "citet", "citep",
109 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
110 "citefullauthor", "Citet", "Citep", "Citealt", "Citealp", "Citeauthor", 0 };
114 * We can't put these into known_latex_commands because the argument order
115 * is reversed in lyx if there are 2 arguments.
116 * No starred form other than "cite*" known.
118 char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
119 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
120 // jurabib commands not (yet) supported by LyX:
122 // "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
123 // "footciteauthor", "footciteyear", "footciteyearpar",
124 "citefield", "citetitle", "cite*", 0 };
126 /// LaTeX names for quotes
127 char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
128 "guillemotright", "frqq", "fg", "glq", "glqq", "textquoteleft", "grq", "grqq",
129 "quotedblbase", "textquotedblleft", "quotesinglbase", "textquoteright", "flq",
130 "guilsinglleft", "frq", "guilsinglright", 0};
132 /// the same as known_quotes with .lyx names
133 char const * const known_coded_quotes[] = { "prd", "ard", "ard", "ard",
134 "ald", "ald", "ald", "gls", "gld", "els", "els", "grd",
135 "gld", "grd", "gls", "ers", "fls",
136 "fls", "frs", "frs", 0};
138 /// LaTeX names for font sizes
139 char const * const known_sizes[] = { "tiny", "scriptsize", "footnotesize",
140 "small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
142 /// the same as known_sizes with .lyx names
143 char const * const known_coded_sizes[] = { "default", "tiny", "scriptsize", "footnotesize",
144 "small", "normal", "large", "larger", "largest", "huge", "giant", 0};
146 /// LaTeX 2.09 names for font families
147 char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
149 /// LaTeX names for font families
150 char const * const known_font_families[] = { "rmfamily", "sffamily",
153 /// the same as known_old_font_families and known_font_families with .lyx names
154 char const * const known_coded_font_families[] = { "roman", "sans",
157 /// LaTeX 2.09 names for font series
158 char const * const known_old_font_series[] = { "bf", 0};
160 /// LaTeX names for font series
161 char const * const known_font_series[] = { "bfseries", "mdseries", 0};
163 /// the same as known_old_font_series and known_font_series with .lyx names
164 char const * const known_coded_font_series[] = { "bold", "medium", 0};
166 /// LaTeX 2.09 names for font shapes
167 char const * const known_old_font_shapes[] = { "it", "sl", "sc", 0};
169 /// LaTeX names for font shapes
170 char const * const known_font_shapes[] = { "itshape", "slshape", "scshape",
173 /// the same as known_old_font_shapes and known_font_shapes with .lyx names
174 char const * const known_coded_font_shapes[] = { "italic", "slanted",
175 "smallcaps", "up", 0};
178 * Graphics file extensions known by the dvips driver of the graphics package.
179 * These extensions are used to complete the filename of an included
180 * graphics file if it does not contain an extension.
181 * The order must be the same that latex uses to find a file, because we
182 * will use the first extension that matches.
183 * This is only an approximation for the common cases. If we would want to
184 * do it right in all cases, we would need to know which graphics driver is
185 * used and know the extensions of every driver of the graphics package.
187 char const * const known_dvips_graphics_formats[] = {"eps", "ps", "eps.gz",
188 "ps.gz", "eps.Z", "ps.Z", 0};
191 * Graphics file extensions known by the pdftex driver of the graphics package.
192 * \sa known_dvips_graphics_formats
194 char const * const known_pdftex_graphics_formats[] = {"png", "pdf", "jpg",
198 * Known file extensions for TeX files as used by \\include.
200 char const * const known_tex_extensions[] = {"tex", 0};
202 /// spaces known by InsetSpace
203 char const * const known_spaces[] = { " ", "space", ",", "thinspace", "quad",
204 "qquad", "enspace", "enskip", "negthinspace", 0};
206 /// the same as known_spaces with .lyx names
207 char const * const known_coded_spaces[] = { "space{}", "space{}",
208 "thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
209 "negthinspace{}", 0};
212 /// splits "x=z, y=b" into a map
213 map<string, string> split_map(string const & s)
215 map<string, string> res;
218 for (size_t i = 0; i < v.size(); ++i) {
219 size_t const pos = v[i].find('=');
220 string const index = v[i].substr(0, pos);
221 string const value = v[i].substr(pos + 1, string::npos);
222 res[trim(index)] = trim(value);
229 * Split a LaTeX length into value and unit.
230 * The latter can be a real unit like "pt", or a latex length variable
231 * like "\textwidth". The unit may contain additional stuff like glue
232 * lengths, but we don't care, because such lengths are ERT anyway.
233 * \returns true if \p value and \p unit are valid.
235 bool splitLatexLength(string const & len, string & value, string & unit)
239 const string::size_type i = len.find_first_not_of(" -+0123456789.,");
240 //'4,5' is a valid LaTeX length number. Change it to '4.5'
241 string const length = subst(len, ',', '.');
242 if (i == string::npos)
245 if (len[0] == '\\') {
246 // We had something like \textwidth without a factor
252 value = trim(string(length, 0, i));
256 // 'cM' is a valid LaTeX length unit. Change it to 'cm'
257 if (contains(len, '\\'))
258 unit = trim(string(len, i));
260 unit = support::ascii_lowercase(trim(string(len, i)));
265 /// A simple function to translate a latex length to something lyx can
266 /// understand. Not perfect, but rather best-effort.
267 bool translate_len(string const & length, string & valstring, string & unit)
269 if (!splitLatexLength(length, valstring, unit))
271 // LyX uses percent values
273 istringstream iss(valstring);
278 string const percentval = oss.str();
280 if (unit.empty() || unit[0] != '\\')
282 string::size_type const i = unit.find(' ');
283 string const endlen = (i == string::npos) ? string() : string(unit, i);
284 if (unit == "\\textwidth") {
285 valstring = percentval;
286 unit = "text%" + endlen;
287 } else if (unit == "\\columnwidth") {
288 valstring = percentval;
289 unit = "col%" + endlen;
290 } else if (unit == "\\paperwidth") {
291 valstring = percentval;
292 unit = "page%" + endlen;
293 } else if (unit == "\\linewidth") {
294 valstring = percentval;
295 unit = "line%" + endlen;
296 } else if (unit == "\\paperheight") {
297 valstring = percentval;
298 unit = "pheight%" + endlen;
299 } else if (unit == "\\textheight") {
300 valstring = percentval;
301 unit = "theight%" + endlen;
309 string translate_len(string const & length)
313 if (translate_len(length, value, unit))
315 // If the input is invalid, return what we have.
323 * Translates a LaTeX length into \p value, \p unit and
324 * \p special parts suitable for a box inset.
325 * The difference from translate_len() is that a box inset knows about
326 * some special "units" that are stored in \p special.
328 void translate_box_len(string const & length, string & value, string & unit, string & special)
330 if (translate_len(length, value, unit)) {
331 if (unit == "\\height" || unit == "\\depth" ||
332 unit == "\\totalheight" || unit == "\\width") {
333 special = unit.substr(1);
334 // The unit is not used, but LyX requires a dummy setting
347 * Find a file with basename \p name in path \p path and an extension
350 string find_file(string const & name, string const & path,
351 char const * const * extensions)
353 // FIXME UNICODE encoding of name and path may be wrong (makeAbsPath
355 for (char const * const * what = extensions; *what; ++what) {
356 string const trial = addExtension(name, *what);
357 if (makeAbsPath(trial, path).exists())
364 void begin_inset(ostream & os, string const & name)
366 os << "\n\\begin_inset " << name;
370 void end_inset(ostream & os)
372 os << "\n\\end_inset\n\n";
376 void skip_braces(Parser & p)
378 if (p.next_token().cat() != catBegin)
381 if (p.next_token().cat() == catEnd) {
389 void handle_ert(ostream & os, string const & s, Context & context)
391 // We must have a valid layout before outputting the ERT inset.
392 context.check_layout(os);
393 Context newcontext(true, context.textclass);
394 begin_inset(os, "ERT");
395 os << "\nstatus collapsed\n";
396 newcontext.check_layout(os);
397 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
399 os << "\n\\backslash\n";
400 else if (*it == '\n') {
401 newcontext.new_paragraph(os);
402 newcontext.check_layout(os);
406 newcontext.check_end_layout(os);
411 void handle_comment(ostream & os, string const & s, Context & context)
413 // TODO: Handle this better
414 Context newcontext(true, context.textclass);
415 begin_inset(os, "ERT");
416 os << "\nstatus collapsed\n";
417 newcontext.check_layout(os);
418 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
420 os << "\n\\backslash\n";
424 // make sure that our comment is the last thing on the line
425 newcontext.new_paragraph(os);
426 newcontext.check_layout(os);
427 newcontext.check_end_layout(os);
432 class isLayout : public std::unary_function<LayoutPtr, bool> {
434 isLayout(string const name) : name_(name) {}
435 bool operator()(LayoutPtr const & ptr) const {
436 return ptr->latexname() == name_;
443 LayoutPtr findLayout(TextClass const & textclass,
446 TextClass::const_iterator beg = textclass.begin();
447 TextClass::const_iterator end = textclass.end();
449 TextClass::const_iterator
450 it = std::find_if(beg, end, isLayout(name));
452 return (it == end) ? LayoutPtr() : *it;
456 void eat_whitespace(Parser &, ostream &, Context &, bool);
459 void output_command_layout(ostream & os, Parser & p, bool outer,
460 Context & parent_context,
463 parent_context.check_end_layout(os);
464 Context context(true, parent_context.textclass, newlayout,
465 parent_context.layout, parent_context.font);
466 if (parent_context.deeper_paragraph) {
467 // We are beginning a nested environment after a
468 // deeper paragraph inside the outer list environment.
469 // Therefore we don't need to output a "begin deeper".
470 context.need_end_deeper = true;
472 context.check_deeper(os);
473 context.check_layout(os);
474 if (context.layout->optionalargs > 0) {
475 eat_whitespace(p, os, context, false);
476 if (p.next_token().character() == '[') {
477 p.get_token(); // eat '['
478 begin_inset(os, "OptArg\n");
479 os << "status collapsed\n\n";
480 parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
482 eat_whitespace(p, os, context, false);
485 parse_text(p, os, FLAG_ITEM, outer, context);
486 context.check_end_layout(os);
487 if (parent_context.deeper_paragraph) {
488 // We must suppress the "end deeper" because we
489 // suppressed the "begin deeper" above.
490 context.need_end_deeper = false;
492 context.check_end_deeper(os);
493 // We don't need really a new paragraph, but
494 // we must make sure that the next item gets a \begin_layout.
495 parent_context.new_paragraph(os);
500 * Output a space if necessary.
501 * This function gets called for every whitespace token.
503 * We have three cases here:
504 * 1. A space must be suppressed. Example: The lyxcode case below
505 * 2. A space may be suppressed. Example: Spaces before "\par"
506 * 3. A space must not be suppressed. Example: A space between two words
508 * We currently handle only 1. and 3 and from 2. only the case of
509 * spaces before newlines as a side effect.
511 * 2. could be used to suppress as many spaces as possible. This has two effects:
512 * - Reimporting LyX generated LaTeX files changes almost no whitespace
513 * - Superflous whitespace from non LyX generated LaTeX files is removed.
514 * The drawback is that the logic inside the function becomes
515 * complicated, and that is the reason why it is not implemented.
517 void check_space(Parser const & p, ostream & os, Context & context)
519 Token const next = p.next_token();
520 Token const curr = p.curr_token();
521 // A space before a single newline and vice versa must be ignored
522 // LyX emits a newline before \end{lyxcode}.
523 // This newline must be ignored,
524 // otherwise LyX will add an additional protected space.
525 if (next.cat() == catSpace ||
526 next.cat() == catNewline ||
527 (next.cs() == "end" && context.layout->free_spacing && curr.cat() == catNewline)) {
530 context.check_layout(os);
536 * Parse all arguments of \p command
538 void parse_arguments(string const & command,
539 vector<ArgumentType> const & template_arguments,
540 Parser & p, ostream & os, bool outer, Context & context)
542 string ert = command;
543 size_t no_arguments = template_arguments.size();
544 for (size_t i = 0; i < no_arguments; ++i) {
545 switch (template_arguments[i]) {
547 // This argument contains regular LaTeX
548 handle_ert(os, ert + '{', context);
549 eat_whitespace(p, os, context, false);
550 parse_text(p, os, FLAG_ITEM, outer, context);
554 // This argument may contain special characters
555 ert += '{' + p.verbatim_item() + '}';
562 handle_ert(os, ert, context);
567 * Check whether \p command is a known command. If yes,
568 * handle the command with all arguments.
569 * \return true if the command was parsed, false otherwise.
571 bool parse_command(string const & command, Parser & p, ostream & os,
572 bool outer, Context & context)
574 if (known_commands.find(command) != known_commands.end()) {
575 parse_arguments(command, known_commands[command], p, os,
583 /// Parses a minipage or parbox
584 void parse_box(Parser & p, ostream & os, unsigned flags, bool outer,
585 Context & parent_context, bool use_parbox)
589 // We need to set the height to the LaTeX default of 1\\totalheight
590 // for the case when no height argument is given
591 string height_value = "1";
592 string height_unit = "in";
593 string height_special = "totalheight";
595 if (p.next_token().asInput() == "[") {
596 position = p.getArg('[', ']');
597 if (position != "t" && position != "c" && position != "b") {
599 cerr << "invalid position for minipage/parbox" << endl;
601 if (p.next_token().asInput() == "[") {
602 latex_height = p.getArg('[', ']');
603 translate_box_len(latex_height, height_value, height_unit, height_special);
605 if (p.next_token().asInput() == "[") {
606 inner_pos = p.getArg('[', ']');
607 if (inner_pos != "c" && inner_pos != "t" &&
608 inner_pos != "b" && inner_pos != "s") {
609 inner_pos = position;
610 cerr << "invalid inner_pos for minipage/parbox"
618 string const latex_width = p.verbatim_item();
619 translate_len(latex_width, width_value, width_unit);
620 if (contains(width_unit, '\\') || contains(height_unit, '\\')) {
621 // LyX can't handle length variables
626 ss << "\\begin{minipage}";
627 if (!position.empty())
628 ss << '[' << position << ']';
629 if (!latex_height.empty())
630 ss << '[' << latex_height << ']';
631 if (!inner_pos.empty())
632 ss << '[' << inner_pos << ']';
633 ss << "{" << latex_width << "}";
636 handle_ert(os, ss.str(), parent_context);
637 parent_context.new_paragraph(os);
638 parse_text_in_inset(p, os, flags, outer, parent_context);
640 handle_ert(os, "}", parent_context);
642 handle_ert(os, "\\end{minipage}", parent_context);
644 // LyX does not like empty positions, so we have
645 // to set them to the LaTeX default values here.
646 if (position.empty())
648 if (inner_pos.empty())
649 inner_pos = position;
650 parent_context.check_layout(os);
651 begin_inset(os, "Box Frameless\n");
652 os << "position \"" << position << "\"\n";
653 os << "hor_pos \"c\"\n";
654 os << "has_inner_box 1\n";
655 os << "inner_pos \"" << inner_pos << "\"\n";
656 os << "use_parbox " << use_parbox << "\n";
657 os << "width \"" << width_value << width_unit << "\"\n";
658 os << "special \"none\"\n";
659 os << "height \"" << height_value << height_unit << "\"\n";
660 os << "height_special \"" << height_special << "\"\n";
661 os << "status open\n\n";
662 parse_text_in_inset(p, os, flags, outer, parent_context);
664 #ifdef PRESERVE_LAYOUT
665 // lyx puts a % after the end of the minipage
666 if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
668 //handle_comment(os, "%dummy", parent_context);
671 parent_context.new_paragraph(os);
673 else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
674 //handle_comment(os, "%dummy", parent_context);
677 // We add a protected space if something real follows
678 if (p.good() && p.next_token().cat() != catComment) {
679 os << "\\InsetSpace ~\n";
687 /// parse an unknown environment
688 void parse_unknown_environment(Parser & p, string const & name, ostream & os,
689 unsigned flags, bool outer,
690 Context & parent_context)
692 if (name == "tabbing")
693 // We need to remember that we have to handle '\=' specially
694 flags |= FLAG_TABBING;
696 // We need to translate font changes and paragraphs inside the
697 // environment to ERT if we have a non standard font.
698 // Otherwise things like
699 // \large\begin{foo}\huge bar\end{foo}
701 bool const specialfont =
702 (parent_context.font != parent_context.normalfont);
703 bool const new_layout_allowed = parent_context.new_layout_allowed;
705 parent_context.new_layout_allowed = false;
706 handle_ert(os, "\\begin{" + name + "}", parent_context);
707 parse_text_snippet(p, os, flags, outer, parent_context);
708 handle_ert(os, "\\end{" + name + "}", parent_context);
710 parent_context.new_layout_allowed = new_layout_allowed;
714 void parse_environment(Parser & p, ostream & os, bool outer,
715 Context & parent_context)
718 string const name = p.getArg('{', '}');
719 const bool is_starred = suffixIs(name, '*');
720 string const unstarred_name = rtrim(name, "*");
721 active_environments.push_back(name);
723 if (is_math_env(name)) {
724 parent_context.check_layout(os);
725 begin_inset(os, "Formula ");
726 os << "\\begin{" << name << "}";
727 parse_math(p, os, FLAG_END, MATH_MODE);
728 os << "\\end{" << name << "}";
732 else if (name == "tabular" || name == "longtable") {
733 eat_whitespace(p, os, parent_context, false);
734 parent_context.check_layout(os);
735 begin_inset(os, "Tabular ");
736 handle_tabular(p, os, name == "longtable", parent_context);
741 else if (parent_context.textclass.floats().typeExist(unstarred_name)) {
742 eat_whitespace(p, os, parent_context, false);
743 parent_context.check_layout(os);
744 begin_inset(os, "Float " + unstarred_name + "\n");
745 if (p.next_token().asInput() == "[") {
746 os << "placement " << p.getArg('[', ']') << '\n';
748 os << "wide " << convert<string>(is_starred)
749 << "\nsideways false"
750 << "\nstatus open\n\n";
751 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
753 // We don't need really a new paragraph, but
754 // we must make sure that the next item gets a \begin_layout.
755 parent_context.new_paragraph(os);
759 else if (name == "minipage") {
760 eat_whitespace(p, os, parent_context, false);
761 parse_box(p, os, FLAG_END, outer, parent_context, false);
765 else if (name == "comment") {
766 eat_whitespace(p, os, parent_context, false);
767 parent_context.check_layout(os);
768 begin_inset(os, "Note Comment\n");
769 os << "status open\n";
770 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
775 else if (name == "lyxgreyedout") {
776 eat_whitespace(p, os, parent_context, false);
777 parent_context.check_layout(os);
778 begin_inset(os, "Note Greyedout\n");
779 os << "status open\n";
780 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
785 else if (name == "framed") {
786 eat_whitespace(p, os, parent_context, false);
787 parent_context.check_layout(os);
788 begin_inset(os, "Note Framed\n");
789 os << "status open\n";
790 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
795 else if (name == "shaded") {
796 eat_whitespace(p, os, parent_context, false);
797 parent_context.check_layout(os);
798 begin_inset(os, "Note Shaded\n");
799 os << "status open\n";
800 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
805 else if (!parent_context.new_layout_allowed)
806 parse_unknown_environment(p, name, os, FLAG_END, outer,
809 // Alignment settings
810 else if (name == "center" || name == "flushleft" || name == "flushright" ||
811 name == "centering" || name == "raggedright" || name == "raggedleft") {
812 eat_whitespace(p, os, parent_context, false);
813 // We must begin a new paragraph if not already done
814 if (! parent_context.atParagraphStart()) {
815 parent_context.check_end_layout(os);
816 parent_context.new_paragraph(os);
818 if (name == "flushleft" || name == "raggedright")
819 parent_context.add_extra_stuff("\\align left\n");
820 else if (name == "flushright" || name == "raggedleft")
821 parent_context.add_extra_stuff("\\align right\n");
823 parent_context.add_extra_stuff("\\align center\n");
824 parse_text(p, os, FLAG_END, outer, parent_context);
825 // Just in case the environment is empty ..
826 parent_context.extra_stuff.erase();
827 // We must begin a new paragraph to reset the alignment
828 parent_context.new_paragraph(os);
832 // The single '=' is meant here.
833 else if ((newlayout = findLayout(parent_context.textclass, name)).get() &&
834 newlayout->isEnvironment()) {
835 eat_whitespace(p, os, parent_context, false);
836 Context context(true, parent_context.textclass, newlayout,
837 parent_context.layout, parent_context.font);
838 if (parent_context.deeper_paragraph) {
839 // We are beginning a nested environment after a
840 // deeper paragraph inside the outer list environment.
841 // Therefore we don't need to output a "begin deeper".
842 context.need_end_deeper = true;
844 parent_context.check_end_layout(os);
845 switch (context.layout->latextype) {
846 case LATEX_LIST_ENVIRONMENT:
847 context.extra_stuff = "\\labelwidthstring "
848 + p.verbatim_item() + '\n';
851 case LATEX_BIB_ENVIRONMENT:
852 p.verbatim_item(); // swallow next arg
858 context.check_deeper(os);
859 parse_text(p, os, FLAG_END, outer, context);
860 context.check_end_layout(os);
861 if (parent_context.deeper_paragraph) {
862 // We must suppress the "end deeper" because we
863 // suppressed the "begin deeper" above.
864 context.need_end_deeper = false;
866 context.check_end_deeper(os);
867 parent_context.new_paragraph(os);
871 else if (name == "appendix") {
872 // This is no good latex style, but it works and is used in some documents...
873 eat_whitespace(p, os, parent_context, false);
874 parent_context.check_end_layout(os);
875 Context context(true, parent_context.textclass, parent_context.layout,
876 parent_context.layout, parent_context.font);
877 context.check_layout(os);
878 os << "\\start_of_appendix\n";
879 parse_text(p, os, FLAG_END, outer, context);
880 context.check_end_layout(os);
884 else if (known_environments.find(name) != known_environments.end()) {
885 vector<ArgumentType> arguments = known_environments[name];
886 // The last "argument" denotes wether we may translate the
887 // environment contents to LyX
888 // The default required if no argument is given makes us
889 // compatible with the reLyXre environment.
890 ArgumentType contents = arguments.empty() ?
893 if (!arguments.empty())
894 arguments.pop_back();
895 // See comment in parse_unknown_environment()
896 bool const specialfont =
897 (parent_context.font != parent_context.normalfont);
898 bool const new_layout_allowed =
899 parent_context.new_layout_allowed;
901 parent_context.new_layout_allowed = false;
902 parse_arguments("\\begin{" + name + "}", arguments, p, os,
903 outer, parent_context);
904 if (contents == verbatim)
905 handle_ert(os, p.verbatimEnvironment(name),
908 parse_text_snippet(p, os, FLAG_END, outer,
910 handle_ert(os, "\\end{" + name + "}", parent_context);
912 parent_context.new_layout_allowed = new_layout_allowed;
916 parse_unknown_environment(p, name, os, FLAG_END, outer,
919 active_environments.pop_back();
923 /// parses a comment and outputs it to \p os.
924 void parse_comment(Parser & p, ostream & os, Token const & t, Context & context)
926 BOOST_ASSERT(t.cat() == catComment);
927 if (!t.cs().empty()) {
928 context.check_layout(os);
929 handle_comment(os, '%' + t.cs(), context);
930 if (p.next_token().cat() == catNewline) {
931 // A newline after a comment line starts a new
933 if (context.new_layout_allowed) {
934 if(!context.atParagraphStart())
935 // Only start a new paragraph if not already
936 // done (we might get called recursively)
937 context.new_paragraph(os);
939 handle_ert(os, "\n", context);
940 eat_whitespace(p, os, context, true);
950 * Reads spaces and comments until the first non-space, non-comment token.
951 * New paragraphs (double newlines or \\par) are handled like simple spaces
952 * if \p eatParagraph is true.
953 * Spaces are skipped, but comments are written to \p os.
955 void eat_whitespace(Parser & p, ostream & os, Context & context,
959 Token const & t = p.get_token();
960 if (t.cat() == catComment)
961 parse_comment(p, os, t, context);
962 else if ((! eatParagraph && p.isParagraph()) ||
963 (t.cat() != catSpace && t.cat() != catNewline)) {
972 * Set a font attribute, parse text and reset the font attribute.
973 * \param attribute Attribute name (e.g. \\family, \\shape etc.)
974 * \param currentvalue Current value of the attribute. Is set to the new
975 * value during parsing.
976 * \param newvalue New value of the attribute
978 void parse_text_attributes(Parser & p, ostream & os, unsigned flags, bool outer,
979 Context & context, string const & attribute,
980 string & currentvalue, string const & newvalue)
982 context.check_layout(os);
983 string const oldvalue = currentvalue;
984 currentvalue = newvalue;
985 os << '\n' << attribute << ' ' << newvalue << "\n";
986 parse_text_snippet(p, os, flags, outer, context);
987 context.check_layout(os);
988 os << '\n' << attribute << ' ' << oldvalue << "\n";
989 currentvalue = oldvalue;
993 /// get the arguments of a natbib or jurabib citation command
994 void get_cite_arguments(Parser & p, bool natbibOrder,
995 string & before, string & after)
997 // We need to distinguish "" and "[]", so we can't use p.getOpt().
999 // text before the citation
1001 // text after the citation
1002 after = p.getFullOpt();
1004 if (!after.empty()) {
1005 before = p.getFullOpt();
1006 if (natbibOrder && !before.empty())
1007 std::swap(before, after);
1012 /// Convert filenames with TeX macros and/or quotes to something LyX
1014 string const normalize_filename(string const & name)
1016 Parser p(trim(name, "\""));
1019 Token const & t = p.get_token();
1020 if (t.cat() != catEscape)
1022 else if (t.cs() == "lyxdot") {
1023 // This is used by LyX for simple dots in relative
1027 } else if (t.cs() == "space") {
1037 /// Convert \p name from TeX convention (relative to master file) to LyX
1038 /// convention (relative to .lyx file) if it is relative
1039 void fix_relative_filename(string & name)
1041 if (lyx::support::absolutePath(name))
1043 // FIXME UNICODE encoding of name may be wrong (makeAbsPath expects
1045 name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFilename()),
1046 from_utf8(getParentFilePath())));
1050 /// Parse a NoWeb Scrap section. The initial "<<" is already parsed.
1051 void parse_noweb(Parser & p, ostream & os, Context & context)
1053 // assemble the rest of the keyword
1057 Token const & t = p.get_token();
1058 if (t.asInput() == ">" && p.next_token().asInput() == ">") {
1061 scrap = (p.good() && p.next_token().asInput() == "=");
1063 name += p.get_token().asInput();
1066 name += t.asInput();
1069 if (!scrap || !context.new_layout_allowed ||
1070 !context.textclass.hasLayout(from_ascii("Scrap"))) {
1071 cerr << "Warning: Could not interpret '" << name
1072 << "'. Ignoring it." << endl;
1076 // We use new_paragraph instead of check_end_layout because the stuff
1077 // following the noweb chunk needs to start with a \begin_layout.
1078 // This may create a new paragraph even if there was none in the
1079 // noweb file, but the alternative is an invalid LyX file. Since
1080 // noweb code chunks are implemented with a layout style in LyX they
1081 // always must be in an own paragraph.
1082 context.new_paragraph(os);
1083 Context newcontext(true, context.textclass,
1084 context.textclass[from_ascii("Scrap")]);
1085 newcontext.check_layout(os);
1088 Token const & t = p.get_token();
1089 // We abuse the parser a bit, because this is no TeX syntax
1091 if (t.cat() == catEscape)
1092 os << subst(t.asInput(), "\\", "\n\\backslash\n");
1094 os << subst(t.asInput(), "\n", "\n\\newline\n");
1095 // The scrap chunk is ended by an @ at the beginning of a line.
1096 // After the @ the line may contain a comment and/or
1097 // whitespace, but nothing else.
1098 if (t.asInput() == "@" && p.prev_token().cat() == catNewline &&
1099 (p.next_token().cat() == catSpace ||
1100 p.next_token().cat() == catNewline ||
1101 p.next_token().cat() == catComment)) {
1102 while (p.good() && p.next_token().cat() == catSpace)
1103 os << p.get_token().asInput();
1104 if (p.next_token().cat() == catComment)
1105 // The comment includes a final '\n'
1106 os << p.get_token().asInput();
1108 if (p.next_token().cat() == catNewline)
1115 newcontext.check_end_layout(os);
1118 } // anonymous namespace
1121 void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
1124 LayoutPtr newlayout;
1125 // store the current selectlanguage to be used after \foreignlanguage
1127 // Store the latest bibliographystyle (needed for bibtex inset)
1128 string bibliographystyle;
1129 bool const use_natbib = used_packages.find("natbib") != used_packages.end();
1130 bool const use_jurabib = used_packages.find("jurabib") != used_packages.end();
1132 Token const & t = p.get_token();
1135 cerr << "t: " << t << " flags: " << flags << "\n";
1138 if (flags & FLAG_ITEM) {
1139 if (t.cat() == catSpace)
1142 flags &= ~FLAG_ITEM;
1143 if (t.cat() == catBegin) {
1144 // skip the brace and collect everything to the next matching
1146 flags |= FLAG_BRACE_LAST;
1150 // handle only this single token, leave the loop if done
1151 flags |= FLAG_LEAVE;
1154 if (t.character() == ']' && (flags & FLAG_BRACK_LAST))
1160 if (t.cat() == catMath) {
1161 // we are inside some text mode thingy, so opening new math is allowed
1162 context.check_layout(os);
1163 begin_inset(os, "Formula ");
1164 Token const & n = p.get_token();
1165 if (n.cat() == catMath && outer) {
1166 // TeX's $$...$$ syntax for displayed math
1168 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
1170 p.get_token(); // skip the second '$' token
1172 // simple $...$ stuff
1175 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
1181 else if (t.cat() == catSuper || t.cat() == catSub)
1182 cerr << "catcode " << t << " illegal in text mode\n";
1184 // Basic support for english quotes. This should be
1185 // extended to other quotes, but is not so easy (a
1186 // left english quote is the same as a right german
1188 else if (t.asInput() == "`" && p.next_token().asInput() == "`") {
1189 context.check_layout(os);
1190 begin_inset(os, "Quotes ");
1196 else if (t.asInput() == "'" && p.next_token().asInput() == "'") {
1197 context.check_layout(os);
1198 begin_inset(os, "Quotes ");
1205 else if (t.asInput() == ">" && p.next_token().asInput() == ">") {
1206 context.check_layout(os);
1207 begin_inset(os, "Quotes ");
1214 else if (t.asInput() == "<" && p.next_token().asInput() == "<") {
1215 context.check_layout(os);
1216 begin_inset(os, "Quotes ");
1223 else if (t.asInput() == "<"
1224 && p.next_token().asInput() == "<" && noweb_mode) {
1226 parse_noweb(p, os, context);
1229 else if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph()))
1230 check_space(p, os, context);
1232 else if (t.character() == '[' && noweb_mode &&
1233 p.next_token().character() == '[') {
1234 // These can contain underscores
1236 string const s = p.getFullOpt() + ']';
1237 if (p.next_token().character() == ']')
1240 cerr << "Warning: Inserting missing ']' in '"
1241 << s << "'." << endl;
1242 handle_ert(os, s, context);
1245 else if (t.cat() == catLetter ||
1246 t.cat() == catOther ||
1247 t.cat() == catAlign ||
1248 t.cat() == catParameter) {
1249 // This translates "&" to "\\&" which may be wrong...
1250 context.check_layout(os);
1251 os << t.character();
1254 else if (p.isParagraph()) {
1255 if (context.new_layout_allowed)
1256 context.new_paragraph(os);
1258 handle_ert(os, "\\par ", context);
1259 eat_whitespace(p, os, context, true);
1262 else if (t.cat() == catActive) {
1263 context.check_layout(os);
1264 if (t.character() == '~') {
1265 if (context.layout->free_spacing)
1268 os << "\\InsetSpace ~\n";
1270 os << t.character();
1273 else if (t.cat() == catBegin &&
1274 p.next_token().cat() == catEnd) {
1276 Token const prev = p.prev_token();
1278 if (p.next_token().character() == '`' ||
1279 (prev.character() == '-' &&
1280 p.next_token().character() == '-'))
1281 ; // ignore it in {}`` or -{}-
1283 handle_ert(os, "{}", context);
1287 else if (t.cat() == catBegin) {
1288 context.check_layout(os);
1289 // special handling of font attribute changes
1290 Token const prev = p.prev_token();
1291 Token const next = p.next_token();
1292 TeXFont const oldFont = context.font;
1293 if (next.character() == '[' ||
1294 next.character() == ']' ||
1295 next.character() == '*') {
1297 if (p.next_token().cat() == catEnd) {
1298 os << next.character();
1302 handle_ert(os, "{", context);
1303 parse_text_snippet(p, os,
1306 handle_ert(os, "}", context);
1308 } else if (! context.new_layout_allowed) {
1309 handle_ert(os, "{", context);
1310 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1312 handle_ert(os, "}", context);
1313 } else if (is_known(next.cs(), known_sizes)) {
1314 // next will change the size, so we must
1316 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1318 if (!context.atParagraphStart())
1320 << context.font.size << "\n";
1321 } else if (is_known(next.cs(), known_font_families)) {
1322 // next will change the font family, so we
1323 // must reset it here
1324 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1326 if (!context.atParagraphStart())
1328 << context.font.family << "\n";
1329 } else if (is_known(next.cs(), known_font_series)) {
1330 // next will change the font series, so we
1331 // must reset it here
1332 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1334 if (!context.atParagraphStart())
1336 << context.font.series << "\n";
1337 } else if (is_known(next.cs(), known_font_shapes)) {
1338 // next will change the font shape, so we
1339 // must reset it here
1340 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1342 if (!context.atParagraphStart())
1344 << context.font.shape << "\n";
1345 } else if (is_known(next.cs(), known_old_font_families) ||
1346 is_known(next.cs(), known_old_font_series) ||
1347 is_known(next.cs(), known_old_font_shapes)) {
1348 // next will change the font family, series
1349 // and shape, so we must reset it here
1350 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1352 if (!context.atParagraphStart())
1354 << context.font.family
1356 << context.font.series
1358 << context.font.shape << "\n";
1360 handle_ert(os, "{", context);
1361 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1363 handle_ert(os, "}", context);
1367 else if (t.cat() == catEnd) {
1368 if (flags & FLAG_BRACE_LAST) {
1371 cerr << "stray '}' in text\n";
1372 handle_ert(os, "}", context);
1375 else if (t.cat() == catComment)
1376 parse_comment(p, os, t, context);
1379 // control sequences
1382 else if (t.cs() == "(") {
1383 context.check_layout(os);
1384 begin_inset(os, "Formula");
1386 parse_math(p, os, FLAG_SIMPLE2, MATH_MODE);
1391 else if (t.cs() == "[") {
1392 context.check_layout(os);
1393 begin_inset(os, "Formula");
1395 parse_math(p, os, FLAG_EQUATION, MATH_MODE);
1400 else if (t.cs() == "begin")
1401 parse_environment(p, os, outer, context);
1403 else if (t.cs() == "end") {
1404 if (flags & FLAG_END) {
1405 // eat environment name
1406 string const name = p.getArg('{', '}');
1407 if (name != active_environment())
1408 cerr << "\\end{" + name + "} does not match \\begin{"
1409 + active_environment() + "}\n";
1412 p.error("found 'end' unexpectedly");
1415 else if (t.cs() == "item") {
1418 bool optarg = false;
1419 if (p.next_token().character() == '[') {
1420 p.get_token(); // eat '['
1421 s = parse_text_snippet(p, FLAG_BRACK_LAST,
1426 context.check_layout(os);
1427 if (context.has_item) {
1428 // An item in an unknown list-like environment
1429 // FIXME: Do this in check_layout()!
1430 context.has_item = false;
1432 handle_ert(os, "\\item", context);
1434 handle_ert(os, "\\item ", context);
1437 if (context.layout->labeltype != LABEL_MANUAL) {
1438 // lyx does not support \item[\mybullet]
1439 // in itemize environments
1440 handle_ert(os, "[", context);
1442 handle_ert(os, "]", context);
1443 } else if (!s.empty()) {
1444 // The space is needed to separate the
1445 // item from the rest of the sentence.
1447 eat_whitespace(p, os, context, false);
1452 else if (t.cs() == "bibitem") {
1454 context.check_layout(os);
1457 os << '{' << p.verbatim_item() << '}' << "\n";
1460 else if (t.cs() == "def") {
1461 context.check_layout(os);
1462 eat_whitespace(p, os, context, false);
1463 string name = p.get_token().cs();
1464 eat_whitespace(p, os, context, false);
1470 while (p.next_token().cat() != catBegin) {
1471 if (p.next_token().cat() == catParameter) {
1476 // followed by number?
1477 if (p.next_token().cat() == catOther) {
1478 char c = p.getChar();
1480 // number = current arity + 1?
1481 if (c == arity + '0' + 1)
1486 paramtext += p.get_token().asString();
1488 paramtext += p.get_token().asString();
1493 // only output simple (i.e. compatible) macro as FormulaMacros
1494 string ert = "\\def\\" + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
1496 context.check_layout(os);
1497 begin_inset(os, "FormulaMacro");
1501 handle_ert(os, ert, context);
1504 else if (t.cs() == "noindent") {
1506 context.add_extra_stuff("\\noindent\n");
1509 else if (t.cs() == "appendix") {
1510 context.add_extra_stuff("\\start_of_appendix\n");
1511 // We need to start a new paragraph. Otherwise the
1512 // appendix in 'bla\appendix\chapter{' would start
1514 context.new_paragraph(os);
1515 // We need to make sure that the paragraph is
1516 // generated even if it is empty. Otherwise the
1517 // appendix in '\par\appendix\par\chapter{' would
1519 context.check_layout(os);
1520 // FIXME: This is a hack to prevent paragraph
1521 // deletion if it is empty. Handle this better!
1523 "%dummy comment inserted by tex2lyx to "
1524 "ensure that this paragraph is not empty",
1526 // Both measures above may generate an additional
1527 // empty paragraph, but that does not hurt, because
1528 // whitespace does not matter here.
1529 eat_whitespace(p, os, context, true);
1532 // Must attempt to parse "Section*" before "Section".
1533 else if ((p.next_token().asInput() == "*") &&
1534 context.new_layout_allowed &&
1535 // The single '=' is meant here.
1536 (newlayout = findLayout(context.textclass,
1537 t.cs() + '*')).get() &&
1538 newlayout->isCommand()) {
1540 output_command_layout(os, p, outer, context, newlayout);
1544 // The single '=' is meant here.
1545 else if (context.new_layout_allowed &&
1546 (newlayout = findLayout(context.textclass, t.cs())).get() &&
1547 newlayout->isCommand()) {
1548 output_command_layout(os, p, outer, context, newlayout);
1552 // Special handling for \caption
1553 // FIXME: remove this when InsetCaption is supported.
1554 else if (context.new_layout_allowed &&
1555 t.cs() == captionlayout->latexname()) {
1556 output_command_layout(os, p, outer, context,
1561 else if (t.cs() == "includegraphics") {
1562 bool const clip = p.next_token().asInput() == "*";
1565 map<string, string> opts = split_map(p.getArg('[', ']'));
1567 opts["clip"] = string();
1568 string name = normalize_filename(p.verbatim_item());
1570 string const path = getMasterFilePath();
1571 // We want to preserve relative / absolute filenames,
1572 // therefore path is only used for testing
1573 // FIXME UNICODE encoding of name and path may be
1574 // wrong (makeAbsPath expects utf8)
1575 if (!makeAbsPath(name, path).exists()) {
1576 // The file extension is probably missing.
1577 // Now try to find it out.
1578 string const dvips_name =
1579 find_file(name, path,
1580 known_dvips_graphics_formats);
1581 string const pdftex_name =
1582 find_file(name, path,
1583 known_pdftex_graphics_formats);
1584 if (!dvips_name.empty()) {
1585 if (!pdftex_name.empty()) {
1586 cerr << "This file contains the "
1588 "\"\\includegraphics{"
1590 "However, files\n\""
1591 << dvips_name << "\" and\n\""
1592 << pdftex_name << "\"\n"
1593 "both exist, so I had to make a "
1594 "choice and took the first one.\n"
1595 "Please move the unwanted one "
1596 "someplace else and try again\n"
1597 "if my choice was wrong."
1601 } else if (!pdftex_name.empty())
1605 // FIXME UNICODE encoding of name and path may be
1606 // wrong (makeAbsPath expects utf8)
1607 if (makeAbsPath(name, path).exists())
1608 fix_relative_filename(name);
1610 cerr << "Warning: Could not find graphics file '"
1611 << name << "'." << endl;
1613 context.check_layout(os);
1614 begin_inset(os, "Graphics ");
1615 os << "\n\tfilename " << name << '\n';
1616 if (opts.find("width") != opts.end())
1618 << translate_len(opts["width"]) << '\n';
1619 if (opts.find("height") != opts.end())
1621 << translate_len(opts["height"]) << '\n';
1622 if (opts.find("scale") != opts.end()) {
1623 istringstream iss(opts["scale"]);
1627 os << "\tscale " << val << '\n';
1629 if (opts.find("angle") != opts.end())
1630 os << "\trotateAngle "
1631 << opts["angle"] << '\n';
1632 if (opts.find("origin") != opts.end()) {
1634 string const opt = opts["origin"];
1635 if (opt.find('l') != string::npos) ss << "left";
1636 if (opt.find('r') != string::npos) ss << "right";
1637 if (opt.find('c') != string::npos) ss << "center";
1638 if (opt.find('t') != string::npos) ss << "Top";
1639 if (opt.find('b') != string::npos) ss << "Bottom";
1640 if (opt.find('B') != string::npos) ss << "Baseline";
1641 if (!ss.str().empty())
1642 os << "\trotateOrigin " << ss.str() << '\n';
1644 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
1646 if (opts.find("keepaspectratio") != opts.end())
1647 os << "\tkeepAspectRatio\n";
1648 if (opts.find("clip") != opts.end())
1650 if (opts.find("draft") != opts.end())
1652 if (opts.find("bb") != opts.end())
1653 os << "\tBoundingBox "
1654 << opts["bb"] << '\n';
1655 int numberOfbbOptions = 0;
1656 if (opts.find("bbllx") != opts.end())
1657 numberOfbbOptions++;
1658 if (opts.find("bblly") != opts.end())
1659 numberOfbbOptions++;
1660 if (opts.find("bburx") != opts.end())
1661 numberOfbbOptions++;
1662 if (opts.find("bbury") != opts.end())
1663 numberOfbbOptions++;
1664 if (numberOfbbOptions == 4)
1665 os << "\tBoundingBox "
1666 << opts["bbllx"] << " " << opts["bblly"] << " "
1667 << opts["bburx"] << " " << opts["bbury"] << '\n';
1668 else if (numberOfbbOptions > 0)
1669 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
1670 numberOfbbOptions = 0;
1671 if (opts.find("natwidth") != opts.end())
1672 numberOfbbOptions++;
1673 if (opts.find("natheight") != opts.end())
1674 numberOfbbOptions++;
1675 if (numberOfbbOptions == 2)
1676 os << "\tBoundingBox 0bp 0bp "
1677 << opts["natwidth"] << " " << opts["natheight"] << '\n';
1678 else if (numberOfbbOptions > 0)
1679 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
1680 ostringstream special;
1681 if (opts.find("hiresbb") != opts.end())
1682 special << "hiresbb,";
1683 if (opts.find("trim") != opts.end())
1685 if (opts.find("viewport") != opts.end())
1686 special << "viewport=" << opts["viewport"] << ',';
1687 if (opts.find("totalheight") != opts.end())
1688 special << "totalheight=" << opts["totalheight"] << ',';
1689 if (opts.find("type") != opts.end())
1690 special << "type=" << opts["type"] << ',';
1691 if (opts.find("ext") != opts.end())
1692 special << "ext=" << opts["ext"] << ',';
1693 if (opts.find("read") != opts.end())
1694 special << "read=" << opts["read"] << ',';
1695 if (opts.find("command") != opts.end())
1696 special << "command=" << opts["command"] << ',';
1697 string s_special = special.str();
1698 if (!s_special.empty()) {
1699 // We had special arguments. Remove the trailing ','.
1700 os << "\tspecial " << s_special.substr(0, s_special.size() - 1) << '\n';
1702 // TODO: Handle the unknown settings better.
1703 // Warn about invalid options.
1704 // Check whether some option was given twice.
1708 else if (t.cs() == "footnote" ||
1709 (t.cs() == "thanks" && context.layout->intitle)) {
1711 context.check_layout(os);
1712 begin_inset(os, "Foot\n");
1713 os << "status collapsed\n\n";
1714 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
1718 else if (t.cs() == "marginpar") {
1720 context.check_layout(os);
1721 begin_inset(os, "Marginal\n");
1722 os << "status collapsed\n\n";
1723 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
1727 else if (t.cs() == "ensuremath") {
1729 context.check_layout(os);
1730 string const s = p.verbatim_item();
1731 if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
1734 handle_ert(os, "\\ensuremath{" + s + "}",
1738 else if (t.cs() == "hfill") {
1739 context.check_layout(os);
1740 os << "\n\\hfill\n";
1745 else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
1746 // FIXME: Somehow prevent title layouts if
1747 // "maketitle" was not found
1749 skip_braces(p); // swallow this
1752 else if (t.cs() == "tableofcontents") {
1754 context.check_layout(os);
1755 begin_inset(os, "LatexCommand \\tableofcontents\n");
1757 skip_braces(p); // swallow this
1760 else if (t.cs() == "listoffigures") {
1762 context.check_layout(os);
1763 begin_inset(os, "FloatList figure\n");
1765 skip_braces(p); // swallow this
1768 else if (t.cs() == "listoftables") {
1770 context.check_layout(os);
1771 begin_inset(os, "FloatList table\n");
1773 skip_braces(p); // swallow this
1776 else if (t.cs() == "listof") {
1777 p.skip_spaces(true);
1778 string const name = p.get_token().asString();
1779 if (context.textclass.floats().typeExist(name)) {
1780 context.check_layout(os);
1781 begin_inset(os, "FloatList ");
1784 p.get_token(); // swallow second arg
1786 handle_ert(os, "\\listof{" + name + "}", context);
1789 else if (t.cs() == "textrm")
1790 parse_text_attributes(p, os, FLAG_ITEM, outer,
1791 context, "\\family",
1792 context.font.family, "roman");
1794 else if (t.cs() == "textsf")
1795 parse_text_attributes(p, os, FLAG_ITEM, outer,
1796 context, "\\family",
1797 context.font.family, "sans");
1799 else if (t.cs() == "texttt")
1800 parse_text_attributes(p, os, FLAG_ITEM, outer,
1801 context, "\\family",
1802 context.font.family, "typewriter");
1804 else if (t.cs() == "textmd")
1805 parse_text_attributes(p, os, FLAG_ITEM, outer,
1806 context, "\\series",
1807 context.font.series, "medium");
1809 else if (t.cs() == "textbf")
1810 parse_text_attributes(p, os, FLAG_ITEM, outer,
1811 context, "\\series",
1812 context.font.series, "bold");
1814 else if (t.cs() == "textup")
1815 parse_text_attributes(p, os, FLAG_ITEM, outer,
1817 context.font.shape, "up");
1819 else if (t.cs() == "textit")
1820 parse_text_attributes(p, os, FLAG_ITEM, outer,
1822 context.font.shape, "italic");
1824 else if (t.cs() == "textsl")
1825 parse_text_attributes(p, os, FLAG_ITEM, outer,
1827 context.font.shape, "slanted");
1829 else if (t.cs() == "textsc")
1830 parse_text_attributes(p, os, FLAG_ITEM, outer,
1832 context.font.shape, "smallcaps");
1834 else if (t.cs() == "textnormal" || t.cs() == "normalfont") {
1835 context.check_layout(os);
1836 TeXFont oldFont = context.font;
1837 context.font.init();
1838 context.font.size = oldFont.size;
1839 os << "\n\\family " << context.font.family << "\n";
1840 os << "\n\\series " << context.font.series << "\n";
1841 os << "\n\\shape " << context.font.shape << "\n";
1842 if (t.cs() == "textnormal") {
1843 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1844 output_font_change(os, context.font, oldFont);
1845 context.font = oldFont;
1847 eat_whitespace(p, os, context, false);
1850 else if (t.cs() == "underbar") {
1851 // Do NOT handle \underline.
1852 // \underbar cuts through y, g, q, p etc.,
1853 // \underline does not.
1854 context.check_layout(os);
1855 os << "\n\\bar under\n";
1856 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1857 context.check_layout(os);
1858 os << "\n\\bar default\n";
1861 else if (t.cs() == "emph" || t.cs() == "noun") {
1862 context.check_layout(os);
1863 os << "\n\\" << t.cs() << " on\n";
1864 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1865 context.check_layout(os);
1866 os << "\n\\" << t.cs() << " default\n";
1869 else if (use_natbib &&
1870 is_known(t.cs(), known_natbib_commands) &&
1871 ((t.cs() != "citefullauthor" &&
1872 t.cs() != "citeyear" &&
1873 t.cs() != "citeyearpar") ||
1874 p.next_token().asInput() != "*")) {
1875 context.check_layout(os);
1877 // \citet[before][after]{a} \citet[after][before]{a}
1878 // \citet[before][]{a} \citet[][before]{a}
1879 // \citet[after]{a} \citet[after]{a}
1880 // \citet{a} \citet{a}
1881 string command = '\\' + t.cs();
1882 if (p.next_token().asInput() == "*") {
1886 if (command == "\\citefullauthor")
1887 // alternative name for "\\citeauthor*"
1888 command = "\\citeauthor*";
1890 // text before the citation
1892 // text after the citation
1894 get_cite_arguments(p, true, before, after);
1896 if (command == "\\cite") {
1897 // \cite without optional argument means
1898 // \citet, \cite with at least one optional
1899 // argument means \citep.
1900 if (before.empty() && after.empty())
1901 command = "\\citet";
1903 command = "\\citep";
1905 if (before.empty() && after == "[]")
1906 // avoid \citet[]{a}
1908 else if (before == "[]" && after == "[]") {
1909 // avoid \citet[][]{a}
1913 begin_inset(os, "LatexCommand ");
1914 os << command << after << before
1915 << '{' << p.verbatim_item() << "}\n";
1919 else if (use_jurabib &&
1920 is_known(t.cs(), known_jurabib_commands)) {
1921 context.check_layout(os);
1922 string const command = '\\' + t.cs();
1923 char argumentOrder = '\0';
1924 vector<string> const & options = used_packages["jurabib"];
1925 if (std::find(options.begin(), options.end(),
1926 "natbiborder") != options.end())
1927 argumentOrder = 'n';
1928 else if (std::find(options.begin(), options.end(),
1929 "jurabiborder") != options.end())
1930 argumentOrder = 'j';
1932 // text before the citation
1934 // text after the citation
1936 get_cite_arguments(p, argumentOrder != 'j', before, after);
1938 string const citation = p.verbatim_item();
1939 if (!before.empty() && argumentOrder == '\0') {
1940 cerr << "Warning: Assuming argument order "
1941 "of jurabib version 0.6 for\n'"
1942 << command << before << after << '{'
1943 << citation << "}'.\n"
1944 "Add 'jurabiborder' to the jurabib "
1945 "package options if you used an\n"
1946 "earlier jurabib version." << endl;
1948 begin_inset(os, "LatexCommand ");
1949 os << command << after << before
1950 << '{' << citation << "}\n";
1954 else if (is_known(t.cs(), known_latex_commands)) {
1955 // This needs to be after the check for natbib and
1956 // jurabib commands, because "cite" has different
1957 // arguments with natbib and jurabib.
1958 context.check_layout(os);
1959 begin_inset(os, "LatexCommand ");
1960 os << '\\' << t.cs();
1961 // lyx cannot handle newlines in a latex command
1962 // FIXME: Move the substitution into parser::getOpt()?
1963 os << subst(p.getOpt(), "\n", " ");
1964 os << subst(p.getOpt(), "\n", " ");
1965 os << '{' << subst(p.verbatim_item(), "\n", " ") << "}\n";
1969 else if (is_known(t.cs(), known_quotes)) {
1970 char const * const * where = is_known(t.cs(), known_quotes);
1971 context.check_layout(os);
1972 begin_inset(os, "Quotes ");
1973 os << known_coded_quotes[where - known_quotes];
1975 // LyX adds {} after the quote, so we have to eat
1976 // spaces here if there are any before a possible
1978 eat_whitespace(p, os, context, false);
1982 else if (is_known(t.cs(), known_sizes) &&
1983 context.new_layout_allowed) {
1984 char const * const * where = is_known(t.cs(), known_sizes);
1985 context.check_layout(os);
1986 TeXFont const oldFont = context.font;
1987 context.font.size = known_coded_sizes[where - known_sizes];
1988 output_font_change(os, oldFont, context.font);
1989 eat_whitespace(p, os, context, false);
1992 else if (is_known(t.cs(), known_font_families) &&
1993 context.new_layout_allowed) {
1994 char const * const * where =
1995 is_known(t.cs(), known_font_families);
1996 context.check_layout(os);
1997 TeXFont const oldFont = context.font;
1998 context.font.family =
1999 known_coded_font_families[where - known_font_families];
2000 output_font_change(os, oldFont, context.font);
2001 eat_whitespace(p, os, context, false);
2004 else if (is_known(t.cs(), known_font_series) &&
2005 context.new_layout_allowed) {
2006 char const * const * where =
2007 is_known(t.cs(), known_font_series);
2008 context.check_layout(os);
2009 TeXFont const oldFont = context.font;
2010 context.font.series =
2011 known_coded_font_series[where - known_font_series];
2012 output_font_change(os, oldFont, context.font);
2013 eat_whitespace(p, os, context, false);
2016 else if (is_known(t.cs(), known_font_shapes) &&
2017 context.new_layout_allowed) {
2018 char const * const * where =
2019 is_known(t.cs(), known_font_shapes);
2020 context.check_layout(os);
2021 TeXFont const oldFont = context.font;
2022 context.font.shape =
2023 known_coded_font_shapes[where - known_font_shapes];
2024 output_font_change(os, oldFont, context.font);
2025 eat_whitespace(p, os, context, false);
2027 else if (is_known(t.cs(), known_old_font_families) &&
2028 context.new_layout_allowed) {
2029 char const * const * where =
2030 is_known(t.cs(), known_old_font_families);
2031 context.check_layout(os);
2032 TeXFont const oldFont = context.font;
2033 context.font.init();
2034 context.font.size = oldFont.size;
2035 context.font.family =
2036 known_coded_font_families[where - known_old_font_families];
2037 output_font_change(os, oldFont, context.font);
2038 eat_whitespace(p, os, context, false);
2041 else if (is_known(t.cs(), known_old_font_series) &&
2042 context.new_layout_allowed) {
2043 char const * const * where =
2044 is_known(t.cs(), known_old_font_series);
2045 context.check_layout(os);
2046 TeXFont const oldFont = context.font;
2047 context.font.init();
2048 context.font.size = oldFont.size;
2049 context.font.series =
2050 known_coded_font_series[where - known_old_font_series];
2051 output_font_change(os, oldFont, context.font);
2052 eat_whitespace(p, os, context, false);
2055 else if (is_known(t.cs(), known_old_font_shapes) &&
2056 context.new_layout_allowed) {
2057 char const * const * where =
2058 is_known(t.cs(), known_old_font_shapes);
2059 context.check_layout(os);
2060 TeXFont const oldFont = context.font;
2061 context.font.init();
2062 context.font.size = oldFont.size;
2063 context.font.shape =
2064 known_coded_font_shapes[where - known_old_font_shapes];
2065 output_font_change(os, oldFont, context.font);
2066 eat_whitespace(p, os, context, false);
2069 else if (t.cs() == "selectlanguage") {
2070 context.check_layout(os);
2071 // save the language for the case that a \foreignlanguage is used
2072 selectlang = subst(p.verbatim_item(), "\n", " ");
2073 os << "\\lang " << selectlang << "\n";
2077 else if (t.cs() == "foreignlanguage") {
2078 context.check_layout(os);
2079 os << "\n\\lang " << subst(p.verbatim_item(), "\n", " ") << "\n";
2080 os << subst(p.verbatim_item(), "\n", " ");
2081 // set back to last selectlanguage
2082 os << "\n\\lang " << selectlang << "\n";
2085 else if (t.cs() == "inputencoding")
2086 // write nothing because this is done by LyX using the "\lang"
2087 // information given by selectlanguage and foreignlanguage
2088 subst(p.verbatim_item(), "\n", " ");
2090 else if (t.cs() == "LyX" || t.cs() == "TeX"
2091 || t.cs() == "LaTeX") {
2092 context.check_layout(os);
2094 skip_braces(p); // eat {}
2097 else if (t.cs() == "LaTeXe") {
2098 context.check_layout(os);
2100 skip_braces(p); // eat {}
2103 else if (t.cs() == "ldots") {
2104 context.check_layout(os);
2106 os << "\\SpecialChar \\ldots{}\n";
2109 else if (t.cs() == "lyxarrow") {
2110 context.check_layout(os);
2111 os << "\\SpecialChar \\menuseparator\n";
2115 else if (t.cs() == "textcompwordmark") {
2116 context.check_layout(os);
2117 os << "\\SpecialChar \\textcompwordmark{}\n";
2121 else if (t.cs() == "@" && p.next_token().asInput() == ".") {
2122 context.check_layout(os);
2123 os << "\\SpecialChar \\@.\n";
2127 else if (t.cs() == "-") {
2128 context.check_layout(os);
2129 os << "\\SpecialChar \\-\n";
2132 else if (t.cs() == "textasciitilde") {
2133 context.check_layout(os);
2138 else if (t.cs() == "textasciicircum") {
2139 context.check_layout(os);
2144 else if (t.cs() == "textbackslash") {
2145 context.check_layout(os);
2146 os << "\n\\backslash\n";
2150 else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
2151 || t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
2153 context.check_layout(os);
2157 else if (t.cs() == "char") {
2158 context.check_layout(os);
2159 if (p.next_token().character() == '`') {
2161 if (p.next_token().cs() == "\"") {
2166 handle_ert(os, "\\char`", context);
2169 handle_ert(os, "\\char", context);
2173 else if (t.cs() == "verb") {
2174 context.check_layout(os);
2175 char const delimiter = p.next_token().character();
2176 string const arg = p.getArg(delimiter, delimiter);
2178 oss << "\\verb" << delimiter << arg << delimiter;
2179 handle_ert(os, oss.str(), context);
2182 else if (t.cs() == "\"") {
2183 context.check_layout(os);
2184 string const name = p.verbatim_item();
2185 if (name == "a") os << '\xe4';
2186 else if (name == "o") os << '\xf6';
2187 else if (name == "u") os << '\xfc';
2188 else if (name == "A") os << '\xc4';
2189 else if (name == "O") os << '\xd6';
2190 else if (name == "U") os << '\xdc';
2191 else handle_ert(os, "\"{" + name + "}", context);
2194 // Problem: \= creates a tabstop inside the tabbing environment
2195 // and else an accent. In the latter case we really would want
2196 // \={o} instead of \= o.
2197 else if (t.cs() == "=" && (flags & FLAG_TABBING))
2198 handle_ert(os, t.asInput(), context);
2200 else if (t.cs() == "H" || t.cs() == "c" || t.cs() == "^"
2201 || t.cs() == "'" || t.cs() == "`"
2202 || t.cs() == "~" || t.cs() == "." || t.cs() == "=") {
2203 // we need the trim as the LyX parser chokes on such spaces
2204 // The argument of InsetLatexAccent is parsed as a
2205 // subset of LaTeX, so don't parse anything here,
2206 // but use the raw argument.
2207 // Otherwise we would convert \~{\i} wrongly.
2208 // This will of course not translate \~{\ss} to \~{ß},
2209 // but that does at least compile and does only look
2210 // strange on screen.
2211 context.check_layout(os);
2212 os << "\\i \\" << t.cs() << "{"
2213 << trim(p.verbatim_item(), " ")
2217 else if (t.cs() == "ss") {
2218 context.check_layout(os);
2220 skip_braces(p); // eat {}
2223 else if (t.cs() == "i" || t.cs() == "j" || t.cs() == "l" ||
2225 context.check_layout(os);
2226 os << "\\i \\" << t.cs() << "{}\n";
2227 skip_braces(p); // eat {}
2230 else if (t.cs() == "\\") {
2231 context.check_layout(os);
2232 string const next = p.next_token().asInput();
2234 handle_ert(os, "\\\\" + p.getOpt(), context);
2235 else if (next == "*") {
2237 handle_ert(os, "\\\\*" + p.getOpt(), context);
2240 os << "\n\\newline\n";
2244 else if (t.cs() == "newline" ||
2245 t.cs() == "linebreak") {
2246 context.check_layout(os);
2247 os << "\n\\" << t.cs() << "\n";
2248 skip_braces(p); // eat {}
2251 else if (t.cs() == "href") {
2252 context.check_layout(os);
2253 begin_inset(os, "CommandInset ");
2254 os << t.cs() << "\n";
2255 os << "LatexCommand " << t.cs() << "\n";
2258 // the first argument is "type:target", "type:" is optional
2259 // the second argument the name
2260 string href_target = subst(p.verbatim_item(), "\n", " ");
2261 string href_name = subst(p.verbatim_item(), "\n", " ");
2263 // serach for the ":" to divide type from target
2264 if ((pos = href_target.find(":", 0)) != string::npos){
2265 href_type = href_target;
2266 href_type.erase(pos + 1, href_type.length());
2267 href_target.erase(0, pos + 1);
2270 os << "name " << '"' << href_name << '"' << "\n";
2271 os << "target " << '"' << href_target << '"' << "\n";
2273 os << "type " << '"' << href_type << '"' << "\n";
2277 else if (t.cs() == "input" || t.cs() == "include"
2278 || t.cs() == "verbatiminput") {
2279 string name = '\\' + t.cs();
2280 if (t.cs() == "verbatiminput"
2281 && p.next_token().asInput() == "*")
2282 name += p.get_token().asInput();
2283 context.check_layout(os);
2284 begin_inset(os, "Include ");
2285 string filename(normalize_filename(p.getArg('{', '}')));
2286 string const path = getMasterFilePath();
2287 // We want to preserve relative / absolute filenames,
2288 // therefore path is only used for testing
2289 // FIXME UNICODE encoding of filename and path may be
2290 // wrong (makeAbsPath expects utf8)
2291 if ((t.cs() == "include" || t.cs() == "input") &&
2292 !makeAbsPath(filename, path).exists()) {
2293 // The file extension is probably missing.
2294 // Now try to find it out.
2295 string const tex_name =
2296 find_file(filename, path,
2297 known_tex_extensions);
2298 if (!tex_name.empty())
2299 filename = tex_name;
2301 // FIXME UNICODE encoding of filename and path may be
2302 // wrong (makeAbsPath expects utf8)
2303 if (makeAbsPath(filename, path).exists()) {
2304 string const abstexname =
2305 makeAbsPath(filename, path).absFilename();
2306 string const abslyxname =
2307 changeExtension(abstexname, ".lyx");
2308 fix_relative_filename(filename);
2309 string const lyxname =
2310 changeExtension(filename, ".lyx");
2311 if (t.cs() != "verbatiminput" &&
2312 tex2lyx(abstexname, FileName(abslyxname))) {
2313 os << name << '{' << lyxname << "}\n";
2315 os << name << '{' << filename << "}\n";
2318 cerr << "Warning: Could not find included file '"
2319 << filename << "'." << endl;
2320 os << name << '{' << filename << "}\n";
2322 os << "preview false\n";
2326 else if (t.cs() == "bibliographystyle") {
2327 // store new bibliographystyle
2328 bibliographystyle = p.verbatim_item();
2329 // output new bibliographystyle.
2330 // This is only necessary if used in some other macro than \bibliography.
2331 handle_ert(os, "\\bibliographystyle{" + bibliographystyle + "}", context);
2334 else if (t.cs() == "bibliography") {
2335 context.check_layout(os);
2336 begin_inset(os, "LatexCommand ");
2338 // Do we have a bibliographystyle set?
2339 if (!bibliographystyle.empty()) {
2340 os << '[' << bibliographystyle << ']';
2342 os << '{' << p.verbatim_item() << "}\n";
2346 else if (t.cs() == "parbox")
2347 parse_box(p, os, FLAG_ITEM, outer, context, true);
2349 else if (t.cs() == "smallskip" ||
2350 t.cs() == "medskip" ||
2351 t.cs() == "bigskip" ||
2352 t.cs() == "vfill") {
2353 context.check_layout(os);
2354 begin_inset(os, "VSpace ");
2360 else if (is_known(t.cs(), known_spaces)) {
2361 char const * const * where = is_known(t.cs(), known_spaces);
2362 context.check_layout(os);
2363 begin_inset(os, "InsetSpace ");
2364 os << '\\' << known_coded_spaces[where - known_spaces]
2366 // LaTeX swallows whitespace after all spaces except
2367 // "\\,". We have to do that here, too, because LyX
2368 // adds "{}" which would make the spaces significant.
2370 eat_whitespace(p, os, context, false);
2371 // LyX adds "{}" after all spaces except "\\ " and
2372 // "\\,", so we have to remove "{}".
2373 // "\\,{}" is equivalent to "\\," in LaTeX, so we
2374 // remove the braces after "\\,", too.
2379 else if (t.cs() == "newpage" ||
2380 t.cs() == "pagebreak" ||
2381 t.cs() == "clearpage" ||
2382 t.cs() == "cleardoublepage") {
2383 context.check_layout(os);
2384 os << "\n\\" << t.cs() << "\n";
2385 skip_braces(p); // eat {}
2388 else if (t.cs() == "newcommand" ||
2389 t.cs() == "providecommand" ||
2390 t.cs() == "renewcommand") {
2391 // these could be handled by parse_command(), but
2392 // we need to call add_known_command() here.
2393 string name = t.asInput();
2394 if (p.next_token().asInput() == "*") {
2395 // Starred form. Eat '*'
2399 string const command = p.verbatim_item();
2400 string const opt1 = p.getOpt();
2401 string const opt2 = p.getFullOpt();
2402 add_known_command(command, opt1, !opt2.empty());
2403 string const ert = name + '{' + command + '}' +
2405 '{' + p.verbatim_item() + '}';
2407 context.check_layout(os);
2408 begin_inset(os, "FormulaMacro");
2413 else if (t.cs() == "vspace") {
2414 bool starred = false;
2415 if (p.next_token().asInput() == "*") {
2419 string const length = p.verbatim_item();
2422 bool valid = splitLatexLength(length, valstring, unit);
2423 bool known_vspace = false;
2424 bool known_unit = false;
2427 istringstream iss(valstring);
2430 if (unit == "\\smallskipamount") {
2432 known_vspace = true;
2433 } else if (unit == "\\medskipamount") {
2435 known_vspace = true;
2436 } else if (unit == "\\bigskipamount") {
2438 known_vspace = true;
2439 } else if (unit == "\\fill") {
2441 known_vspace = true;
2444 if (!known_vspace) {
2445 switch (unitFromString(unit)) {
2466 if (known_unit || known_vspace) {
2467 // Literal length or known variable
2468 context.check_layout(os);
2469 begin_inset(os, "VSpace ");
2477 // LyX can't handle other length variables in Inset VSpace
2478 string name = t.asInput();
2483 handle_ert(os, name + '{' + unit + '}', context);
2484 else if (value == -1.0)
2485 handle_ert(os, name + "{-" + unit + '}', context);
2487 handle_ert(os, name + '{' + valstring + unit + '}', context);
2489 handle_ert(os, name + '{' + length + '}', context);
2494 //cerr << "#: " << t << " mode: " << mode << endl;
2495 // heuristic: read up to next non-nested space
2497 string s = t.asInput();
2498 string z = p.verbatim_item();
2499 while (p.good() && z != " " && z.size()) {
2500 //cerr << "read: " << z << endl;
2502 z = p.verbatim_item();
2504 cerr << "found ERT: " << s << endl;
2505 handle_ert(os, s + ' ', context);
2507 string name = t.asInput();
2508 if (p.next_token().asInput() == "*") {
2509 // Starred commands like \vspace*{}
2510 p.get_token(); // Eat '*'
2513 if (! parse_command(name, p, os, outer, context))
2514 handle_ert(os, name, context);
2517 if (flags & FLAG_LEAVE) {
2518 flags &= ~FLAG_LEAVE;