2 * \file tex2lyx/text.cpp
3 * This file is part of LyX, the document processor.
4 * Licence details can be found in the file COPYING.
7 * \author Jean-Marc Lasgouttes
9 * Full author contact details are available in file CREDITS.
19 #include "FloatList.h"
23 #include "support/lstrings.h"
24 #include "support/convert.h"
25 #include "support/filetools.h"
27 #include <boost/tuple/tuple.hpp>
39 using std::ostringstream;
40 using std::istringstream;
46 using support::addExtension;
47 using support::changeExtension;
48 using support::FileName;
49 using support::makeAbsPath;
50 using support::makeRelPath;
52 using support::suffixIs;
53 using support::contains;
57 void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
58 Context const & context)
60 Context newcontext(true, context.textclass);
61 newcontext.font = context.font;
62 parse_text(p, os, flags, outer, newcontext);
63 newcontext.check_end_layout(os);
69 /// parses a paragraph snippet, useful for example for \\emph{...}
70 void parse_text_snippet(Parser & p, ostream & os, unsigned flags, bool outer,
73 Context newcontext(context);
74 // Don't inherit the extra stuff
75 newcontext.extra_stuff.clear();
76 parse_text(p, os, flags, outer, newcontext);
77 // Make sure that we don't create invalid .lyx files
78 context.need_layout = newcontext.need_layout;
79 context.need_end_layout = newcontext.need_end_layout;
84 * Thin wrapper around parse_text_snippet() using a string.
86 * We completely ignore \c context.need_layout and \c context.need_end_layout,
87 * because our return value is not used directly (otherwise the stream version
88 * of parse_text_snippet() could be used). That means that the caller needs
89 * to do layout management manually.
90 * This is intended to parse text that does not create any layout changes.
92 string parse_text_snippet(Parser & p, unsigned flags, const bool outer,
95 Context newcontext(context);
96 newcontext.need_layout = false;
97 newcontext.need_end_layout = false;
98 newcontext.new_layout_allowed = false;
99 // Avoid warning by Context::~Context()
100 newcontext.extra_stuff.clear();
102 parse_text_snippet(p, os, flags, outer, newcontext);
107 char const * const known_latex_commands[] = { "ref", "cite", "label", "href",
108 "index", "printindex", "pageref", "url", "vref", "vpageref", "prettyref",
113 * We can't put these into known_latex_commands because the argument order
114 * is reversed in lyx if there are 2 arguments.
115 * The starred forms are also known.
117 char const * const known_natbib_commands[] = { "cite", "citet", "citep",
118 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
119 "citefullauthor", "Citet", "Citep", "Citealt", "Citealp", "Citeauthor", 0 };
123 * We can't put these into known_latex_commands because the argument order
124 * is reversed in lyx if there are 2 arguments.
125 * No starred form other than "cite*" known.
127 char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
128 "citealt", "citealp", "citeauthor", "citeyear", "citeyearpar",
129 // jurabib commands not (yet) supported by LyX:
131 // "footcite", "footcitet", "footcitep", "footcitealt", "footcitealp",
132 // "footciteauthor", "footciteyear", "footciteyearpar",
133 "citefield", "citetitle", "cite*", 0 };
135 /// LaTeX names for quotes
136 char const * const known_quotes[] = { "glqq", "grqq", "quotedblbase",
137 "textquotedblleft", "quotesinglbase", "guilsinglleft", "guilsinglright", 0};
139 /// the same as known_quotes with .lyx names
140 char const * const known_coded_quotes[] = { "gld", "grd", "gld",
141 "grd", "gls", "fls", "frs", 0};
143 /// LaTeX names for font sizes
144 char const * const known_sizes[] = { "tiny", "scriptsize", "footnotesize",
145 "small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
147 /// the same as known_sizes with .lyx names
148 char const * const known_coded_sizes[] = { "tiny", "scriptsize", "footnotesize",
149 "small", "normal", "large", "larger", "largest", "huge", "giant", 0};
151 /// LaTeX 2.09 names for font families
152 char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
154 /// LaTeX names for font families
155 char const * const known_font_families[] = { "rmfamily", "sffamily",
158 /// the same as known_old_font_families and known_font_families with .lyx names
159 char const * const known_coded_font_families[] = { "roman", "sans",
162 /// LaTeX 2.09 names for font series
163 char const * const known_old_font_series[] = { "bf", 0};
165 /// LaTeX names for font series
166 char const * const known_font_series[] = { "bfseries", "mdseries", 0};
168 /// the same as known_old_font_series and known_font_series with .lyx names
169 char const * const known_coded_font_series[] = { "bold", "medium", 0};
171 /// LaTeX 2.09 names for font shapes
172 char const * const known_old_font_shapes[] = { "it", "sl", "sc", 0};
174 /// LaTeX names for font shapes
175 char const * const known_font_shapes[] = { "itshape", "slshape", "scshape",
178 /// the same as known_old_font_shapes and known_font_shapes with .lyx names
179 char const * const known_coded_font_shapes[] = { "italic", "slanted",
180 "smallcaps", "up", 0};
183 * Graphics file extensions known by the dvips driver of the graphics package.
184 * These extensions are used to complete the filename of an included
185 * graphics file if it does not contain an extension.
186 * The order must be the same that latex uses to find a file, because we
187 * will use the first extension that matches.
188 * This is only an approximation for the common cases. If we would want to
189 * do it right in all cases, we would need to know which graphics driver is
190 * used and know the extensions of every driver of the graphics package.
192 char const * const known_dvips_graphics_formats[] = {"eps", "ps", "eps.gz",
193 "ps.gz", "eps.Z", "ps.Z", 0};
196 * Graphics file extensions known by the pdftex driver of the graphics package.
197 * \sa known_dvips_graphics_formats
199 char const * const known_pdftex_graphics_formats[] = {"png", "pdf", "jpg",
203 * Known file extensions for TeX files as used by \\include.
205 char const * const known_tex_extensions[] = {"tex", 0};
207 /// spaces known by InsetSpace
208 char const * const known_spaces[] = { " ", "space", ",", "thinspace", "quad",
209 "qquad", "enspace", "enskip", "negthinspace", 0};
211 /// the same as known_spaces with .lyx names
212 char const * const known_coded_spaces[] = { "space{}", "space{}",
213 "thinspace{}", "thinspace{}", "quad{}", "qquad{}", "enspace{}", "enskip{}",
214 "negthinspace{}", 0};
217 /// splits "x=z, y=b" into a map
218 map<string, string> split_map(string const & s)
220 map<string, string> res;
223 for (size_t i = 0; i < v.size(); ++i) {
224 size_t const pos = v[i].find('=');
225 string const index = v[i].substr(0, pos);
226 string const value = v[i].substr(pos + 1, string::npos);
227 res[trim(index)] = trim(value);
234 * Split a LaTeX length into value and unit.
235 * The latter can be a real unit like "pt", or a latex length variable
236 * like "\textwidth". The unit may contain additional stuff like glue
237 * lengths, but we don't care, because such lengths are ERT anyway.
238 * \returns true if \p value and \p unit are valid.
240 bool splitLatexLength(string const & len, string & value, string & unit)
244 const string::size_type i = len.find_first_not_of(" -+0123456789.,");
245 //'4,5' is a valid LaTeX length number. Change it to '4.5'
246 string const length = subst(len, ',', '.');
247 if (i == string::npos)
250 if (len[0] == '\\') {
251 // We had something like \textwidth without a factor
257 value = trim(string(length, 0, i));
261 // 'cM' is a valid LaTeX length unit. Change it to 'cm'
262 if (contains(len, '\\'))
263 unit = trim(string(len, i));
265 unit = support::ascii_lowercase(trim(string(len, i)));
270 /// A simple function to translate a latex length to something lyx can
271 /// understand. Not perfect, but rather best-effort.
272 bool translate_len(string const & length, string & valstring, string & unit)
274 if (!splitLatexLength(length, valstring, unit))
276 // LyX uses percent values
278 istringstream iss(valstring);
283 string const percentval = oss.str();
285 if (unit.empty() || unit[0] != '\\')
287 string::size_type const i = unit.find(' ');
288 string const endlen = (i == string::npos) ? string() : string(unit, i);
289 if (unit == "\\textwidth") {
290 valstring = percentval;
291 unit = "text%" + endlen;
292 } else if (unit == "\\columnwidth") {
293 valstring = percentval;
294 unit = "col%" + endlen;
295 } else if (unit == "\\paperwidth") {
296 valstring = percentval;
297 unit = "page%" + endlen;
298 } else if (unit == "\\linewidth") {
299 valstring = percentval;
300 unit = "line%" + endlen;
301 } else if (unit == "\\paperheight") {
302 valstring = percentval;
303 unit = "pheight%" + endlen;
304 } else if (unit == "\\textheight") {
305 valstring = percentval;
306 unit = "theight%" + endlen;
314 string translate_len(string const & length)
318 if (translate_len(length, value, unit))
320 // If the input is invalid, return what we have.
328 * Translates a LaTeX length into \p value, \p unit and
329 * \p special parts suitable for a box inset.
330 * The difference from translate_len() is that a box inset knows about
331 * some special "units" that are stored in \p special.
333 void translate_box_len(string const & length, string & value, string & unit, string & special)
335 if (translate_len(length, value, unit)) {
336 if (unit == "\\height" || unit == "\\depth" ||
337 unit == "\\totalheight" || unit == "\\width") {
338 special = unit.substr(1);
339 // The unit is not used, but LyX requires a dummy setting
352 * Find a file with basename \p name in path \p path and an extension
355 string find_file(string const & name, string const & path,
356 char const * const * extensions)
358 // FIXME UNICODE encoding of name and path may be wrong (makeAbsPath
360 for (char const * const * what = extensions; *what; ++what) {
361 string const trial = addExtension(name, *what);
362 if (makeAbsPath(trial, path).exists())
369 void begin_inset(ostream & os, string const & name)
371 os << "\n\\begin_inset " << name;
375 void end_inset(ostream & os)
377 os << "\n\\end_inset\n\n";
381 void skip_braces(Parser & p)
383 if (p.next_token().cat() != catBegin)
386 if (p.next_token().cat() == catEnd) {
394 void handle_ert(ostream & os, string const & s, Context & context)
396 // We must have a valid layout before outputting the ERT inset.
397 context.check_layout(os);
398 Context newcontext(true, context.textclass);
399 begin_inset(os, "ERT");
400 os << "\nstatus collapsed\n";
401 newcontext.check_layout(os);
402 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
404 os << "\n\\backslash\n";
405 else if (*it == '\n') {
406 newcontext.new_paragraph(os);
407 newcontext.check_layout(os);
411 newcontext.check_end_layout(os);
416 void handle_comment(ostream & os, string const & s, Context & context)
418 // TODO: Handle this better
419 Context newcontext(true, context.textclass);
420 begin_inset(os, "ERT");
421 os << "\nstatus collapsed\n";
422 newcontext.check_layout(os);
423 for (string::const_iterator it = s.begin(), et = s.end(); it != et; ++it) {
425 os << "\n\\backslash\n";
429 // make sure that our comment is the last thing on the line
430 newcontext.new_paragraph(os);
431 newcontext.check_layout(os);
432 newcontext.check_end_layout(os);
437 class isLayout : public std::unary_function<LayoutPtr, bool> {
439 isLayout(string const name) : name_(name) {}
440 bool operator()(LayoutPtr const & ptr) const {
441 return ptr->latexname() == name_;
448 LayoutPtr findLayout(TextClass const & textclass,
451 TextClass::const_iterator beg = textclass.begin();
452 TextClass::const_iterator end = textclass.end();
454 TextClass::const_iterator
455 it = std::find_if(beg, end, isLayout(name));
457 return (it == end) ? LayoutPtr() : *it;
461 void eat_whitespace(Parser &, ostream &, Context &, bool);
464 void output_command_layout(ostream & os, Parser & p, bool outer,
465 Context & parent_context,
468 parent_context.check_end_layout(os);
469 Context context(true, parent_context.textclass, newlayout,
470 parent_context.layout, parent_context.font);
471 if (parent_context.deeper_paragraph) {
472 // We are beginning a nested environment after a
473 // deeper paragraph inside the outer list environment.
474 // Therefore we don't need to output a "begin deeper".
475 context.need_end_deeper = true;
477 context.check_deeper(os);
478 context.check_layout(os);
479 if (context.layout->optionalargs > 0) {
480 eat_whitespace(p, os, context, false);
481 if (p.next_token().character() == '[') {
482 p.get_token(); // eat '['
483 begin_inset(os, "OptArg\n");
484 os << "status collapsed\n\n";
485 parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
487 eat_whitespace(p, os, context, false);
490 parse_text(p, os, FLAG_ITEM, outer, context);
491 context.check_end_layout(os);
492 if (parent_context.deeper_paragraph) {
493 // We must suppress the "end deeper" because we
494 // suppressed the "begin deeper" above.
495 context.need_end_deeper = false;
497 context.check_end_deeper(os);
498 // We don't need really a new paragraph, but
499 // we must make sure that the next item gets a \begin_layout.
500 parent_context.new_paragraph(os);
505 * Output a space if necessary.
506 * This function gets called for every whitespace token.
508 * We have three cases here:
509 * 1. A space must be suppressed. Example: The lyxcode case below
510 * 2. A space may be suppressed. Example: Spaces before "\par"
511 * 3. A space must not be suppressed. Example: A space between two words
513 * We currently handle only 1. and 3 and from 2. only the case of
514 * spaces before newlines as a side effect.
516 * 2. could be used to suppress as many spaces as possible. This has two effects:
517 * - Reimporting LyX generated LaTeX files changes almost no whitespace
518 * - Superflous whitespace from non LyX generated LaTeX files is removed.
519 * The drawback is that the logic inside the function becomes
520 * complicated, and that is the reason why it is not implemented.
522 void check_space(Parser const & p, ostream & os, Context & context)
524 Token const next = p.next_token();
525 Token const curr = p.curr_token();
526 // A space before a single newline and vice versa must be ignored
527 // LyX emits a newline before \end{lyxcode}.
528 // This newline must be ignored,
529 // otherwise LyX will add an additional protected space.
530 if (next.cat() == catSpace ||
531 next.cat() == catNewline ||
532 (next.cs() == "end" && context.layout->free_spacing && curr.cat() == catNewline)) {
535 context.check_layout(os);
541 * Parse all arguments of \p command
543 void parse_arguments(string const & command,
544 vector<ArgumentType> const & template_arguments,
545 Parser & p, ostream & os, bool outer, Context & context)
547 string ert = command;
548 size_t no_arguments = template_arguments.size();
549 for (size_t i = 0; i < no_arguments; ++i) {
550 switch (template_arguments[i]) {
552 // This argument contains regular LaTeX
553 handle_ert(os, ert + '{', context);
554 eat_whitespace(p, os, context, false);
555 parse_text(p, os, FLAG_ITEM, outer, context);
559 // This argument may contain special characters
560 ert += '{' + p.verbatim_item() + '}';
567 handle_ert(os, ert, context);
572 * Check whether \p command is a known command. If yes,
573 * handle the command with all arguments.
574 * \return true if the command was parsed, false otherwise.
576 bool parse_command(string const & command, Parser & p, ostream & os,
577 bool outer, Context & context)
579 if (known_commands.find(command) != known_commands.end()) {
580 parse_arguments(command, known_commands[command], p, os,
588 /// Parses a minipage or parbox
589 void parse_box(Parser & p, ostream & os, unsigned flags, bool outer,
590 Context & parent_context, bool use_parbox)
594 // We need to set the height to the LaTeX default of 1\\totalheight
595 // for the case when no height argument is given
596 string height_value = "1";
597 string height_unit = "in";
598 string height_special = "totalheight";
600 if (p.next_token().asInput() == "[") {
601 position = p.getArg('[', ']');
602 if (position != "t" && position != "c" && position != "b") {
604 cerr << "invalid position for minipage/parbox" << endl;
606 if (p.next_token().asInput() == "[") {
607 latex_height = p.getArg('[', ']');
608 translate_box_len(latex_height, height_value, height_unit, height_special);
610 if (p.next_token().asInput() == "[") {
611 inner_pos = p.getArg('[', ']');
612 if (inner_pos != "c" && inner_pos != "t" &&
613 inner_pos != "b" && inner_pos != "s") {
614 inner_pos = position;
615 cerr << "invalid inner_pos for minipage/parbox"
623 string const latex_width = p.verbatim_item();
624 translate_len(latex_width, width_value, width_unit);
625 if (contains(width_unit, '\\') || contains(height_unit, '\\')) {
626 // LyX can't handle length variables
631 ss << "\\begin{minipage}";
632 if (!position.empty())
633 ss << '[' << position << ']';
634 if (!latex_height.empty())
635 ss << '[' << latex_height << ']';
636 if (!inner_pos.empty())
637 ss << '[' << inner_pos << ']';
638 ss << "{" << latex_width << "}";
641 handle_ert(os, ss.str(), parent_context);
642 parent_context.new_paragraph(os);
643 parse_text_in_inset(p, os, flags, outer, parent_context);
645 handle_ert(os, "}", parent_context);
647 handle_ert(os, "\\end{minipage}", parent_context);
649 // LyX does not like empty positions, so we have
650 // to set them to the LaTeX default values here.
651 if (position.empty())
653 if (inner_pos.empty())
654 inner_pos = position;
655 parent_context.check_layout(os);
656 begin_inset(os, "Box Frameless\n");
657 os << "position \"" << position << "\"\n";
658 os << "hor_pos \"c\"\n";
659 os << "has_inner_box 1\n";
660 os << "inner_pos \"" << inner_pos << "\"\n";
661 os << "use_parbox " << use_parbox << "\n";
662 os << "width \"" << width_value << width_unit << "\"\n";
663 os << "special \"none\"\n";
664 os << "height \"" << height_value << height_unit << "\"\n";
665 os << "height_special \"" << height_special << "\"\n";
666 os << "status open\n\n";
667 parse_text_in_inset(p, os, flags, outer, parent_context);
669 #ifdef PRESERVE_LAYOUT
670 // lyx puts a % after the end of the minipage
671 if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
673 //handle_comment(os, "%dummy", parent_context);
676 parent_context.new_paragraph(os);
678 else if (p.next_token().cat() == catSpace || p.next_token().cat() == catNewline) {
679 //handle_comment(os, "%dummy", parent_context);
682 // We add a protected space if something real follows
683 if (p.good() && p.next_token().cat() != catComment) {
684 os << "\\InsetSpace ~\n";
692 /// parse an unknown environment
693 void parse_unknown_environment(Parser & p, string const & name, ostream & os,
694 unsigned flags, bool outer,
695 Context & parent_context)
697 if (name == "tabbing")
698 // We need to remember that we have to handle '\=' specially
699 flags |= FLAG_TABBING;
701 // We need to translate font changes and paragraphs inside the
702 // environment to ERT if we have a non standard font.
703 // Otherwise things like
704 // \large\begin{foo}\huge bar\end{foo}
706 bool const specialfont =
707 (parent_context.font != parent_context.normalfont);
708 bool const new_layout_allowed = parent_context.new_layout_allowed;
710 parent_context.new_layout_allowed = false;
711 handle_ert(os, "\\begin{" + name + "}", parent_context);
712 parse_text_snippet(p, os, flags, outer, parent_context);
713 handle_ert(os, "\\end{" + name + "}", parent_context);
715 parent_context.new_layout_allowed = new_layout_allowed;
719 void parse_environment(Parser & p, ostream & os, bool outer,
720 Context & parent_context)
723 string const name = p.getArg('{', '}');
724 const bool is_starred = suffixIs(name, '*');
725 string const unstarred_name = rtrim(name, "*");
726 active_environments.push_back(name);
728 if (is_math_env(name)) {
729 parent_context.check_layout(os);
730 begin_inset(os, "Formula ");
731 os << "\\begin{" << name << "}";
732 parse_math(p, os, FLAG_END, MATH_MODE);
733 os << "\\end{" << name << "}";
737 else if (name == "tabular" || name == "longtable") {
738 eat_whitespace(p, os, parent_context, false);
739 parent_context.check_layout(os);
740 begin_inset(os, "Tabular ");
741 handle_tabular(p, os, name == "longtable", parent_context);
746 else if (parent_context.textclass.floats().typeExist(unstarred_name)) {
747 eat_whitespace(p, os, parent_context, false);
748 parent_context.check_layout(os);
749 begin_inset(os, "Float " + unstarred_name + "\n");
750 if (p.next_token().asInput() == "[") {
751 os << "placement " << p.getArg('[', ']') << '\n';
753 os << "wide " << convert<string>(is_starred)
754 << "\nsideways false"
755 << "\nstatus open\n\n";
756 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
758 // We don't need really a new paragraph, but
759 // we must make sure that the next item gets a \begin_layout.
760 parent_context.new_paragraph(os);
764 else if (name == "minipage") {
765 eat_whitespace(p, os, parent_context, false);
766 parse_box(p, os, FLAG_END, outer, parent_context, false);
770 else if (name == "comment") {
771 eat_whitespace(p, os, parent_context, false);
772 parent_context.check_layout(os);
773 begin_inset(os, "Note Comment\n");
774 os << "status open\n";
775 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
780 else if (name == "lyxgreyedout") {
781 eat_whitespace(p, os, parent_context, false);
782 parent_context.check_layout(os);
783 begin_inset(os, "Note Greyedout\n");
784 os << "status open\n";
785 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
790 else if (name == "framed") {
791 eat_whitespace(p, os, parent_context, false);
792 parent_context.check_layout(os);
793 begin_inset(os, "Note Framed\n");
794 os << "status open\n";
795 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
800 else if (name == "shaded") {
801 eat_whitespace(p, os, parent_context, false);
802 parent_context.check_layout(os);
803 begin_inset(os, "Note Shaded\n");
804 os << "status open\n";
805 parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
810 else if (!parent_context.new_layout_allowed)
811 parse_unknown_environment(p, name, os, FLAG_END, outer,
814 // Alignment settings
815 else if (name == "center" || name == "flushleft" || name == "flushright" ||
816 name == "centering" || name == "raggedright" || name == "raggedleft") {
817 eat_whitespace(p, os, parent_context, false);
818 // We must begin a new paragraph if not already done
819 if (! parent_context.atParagraphStart()) {
820 parent_context.check_end_layout(os);
821 parent_context.new_paragraph(os);
823 if (name == "flushleft" || name == "raggedright")
824 parent_context.add_extra_stuff("\\align left\n");
825 else if (name == "flushright" || name == "raggedleft")
826 parent_context.add_extra_stuff("\\align right\n");
828 parent_context.add_extra_stuff("\\align center\n");
829 parse_text(p, os, FLAG_END, outer, parent_context);
830 // Just in case the environment is empty ..
831 parent_context.extra_stuff.erase();
832 // We must begin a new paragraph to reset the alignment
833 parent_context.new_paragraph(os);
837 // The single '=' is meant here.
838 else if ((newlayout = findLayout(parent_context.textclass, name)).get() &&
839 newlayout->isEnvironment()) {
840 eat_whitespace(p, os, parent_context, false);
841 Context context(true, parent_context.textclass, newlayout,
842 parent_context.layout, parent_context.font);
843 if (parent_context.deeper_paragraph) {
844 // We are beginning a nested environment after a
845 // deeper paragraph inside the outer list environment.
846 // Therefore we don't need to output a "begin deeper".
847 context.need_end_deeper = true;
849 parent_context.check_end_layout(os);
850 switch (context.layout->latextype) {
851 case LATEX_LIST_ENVIRONMENT:
852 context.extra_stuff = "\\labelwidthstring "
853 + p.verbatim_item() + '\n';
856 case LATEX_BIB_ENVIRONMENT:
857 p.verbatim_item(); // swallow next arg
863 context.check_deeper(os);
864 parse_text(p, os, FLAG_END, outer, context);
865 context.check_end_layout(os);
866 if (parent_context.deeper_paragraph) {
867 // We must suppress the "end deeper" because we
868 // suppressed the "begin deeper" above.
869 context.need_end_deeper = false;
871 context.check_end_deeper(os);
872 parent_context.new_paragraph(os);
876 else if (name == "appendix") {
877 // This is no good latex style, but it works and is used in some documents...
878 eat_whitespace(p, os, parent_context, false);
879 parent_context.check_end_layout(os);
880 Context context(true, parent_context.textclass, parent_context.layout,
881 parent_context.layout, parent_context.font);
882 context.check_layout(os);
883 os << "\\start_of_appendix\n";
884 parse_text(p, os, FLAG_END, outer, context);
885 context.check_end_layout(os);
889 else if (known_environments.find(name) != known_environments.end()) {
890 vector<ArgumentType> arguments = known_environments[name];
891 // The last "argument" denotes wether we may translate the
892 // environment contents to LyX
893 // The default required if no argument is given makes us
894 // compatible with the reLyXre environment.
895 ArgumentType contents = arguments.empty() ?
898 if (!arguments.empty())
899 arguments.pop_back();
900 // See comment in parse_unknown_environment()
901 bool const specialfont =
902 (parent_context.font != parent_context.normalfont);
903 bool const new_layout_allowed =
904 parent_context.new_layout_allowed;
906 parent_context.new_layout_allowed = false;
907 parse_arguments("\\begin{" + name + "}", arguments, p, os,
908 outer, parent_context);
909 if (contents == verbatim)
910 handle_ert(os, p.verbatimEnvironment(name),
913 parse_text_snippet(p, os, FLAG_END, outer,
915 handle_ert(os, "\\end{" + name + "}", parent_context);
917 parent_context.new_layout_allowed = new_layout_allowed;
921 parse_unknown_environment(p, name, os, FLAG_END, outer,
924 active_environments.pop_back();
928 /// parses a comment and outputs it to \p os.
929 void parse_comment(Parser & p, ostream & os, Token const & t, Context & context)
931 BOOST_ASSERT(t.cat() == catComment);
932 if (!t.cs().empty()) {
933 context.check_layout(os);
934 handle_comment(os, '%' + t.cs(), context);
935 if (p.next_token().cat() == catNewline) {
936 // A newline after a comment line starts a new
938 if (context.new_layout_allowed) {
939 if(!context.atParagraphStart())
940 // Only start a new paragraph if not already
941 // done (we might get called recursively)
942 context.new_paragraph(os);
944 handle_ert(os, "\n", context);
945 eat_whitespace(p, os, context, true);
955 * Reads spaces and comments until the first non-space, non-comment token.
956 * New paragraphs (double newlines or \\par) are handled like simple spaces
957 * if \p eatParagraph is true.
958 * Spaces are skipped, but comments are written to \p os.
960 void eat_whitespace(Parser & p, ostream & os, Context & context,
964 Token const & t = p.get_token();
965 if (t.cat() == catComment)
966 parse_comment(p, os, t, context);
967 else if ((! eatParagraph && p.isParagraph()) ||
968 (t.cat() != catSpace && t.cat() != catNewline)) {
977 * Set a font attribute, parse text and reset the font attribute.
978 * \param attribute Attribute name (e.g. \\family, \\shape etc.)
979 * \param currentvalue Current value of the attribute. Is set to the new
980 * value during parsing.
981 * \param newvalue New value of the attribute
983 void parse_text_attributes(Parser & p, ostream & os, unsigned flags, bool outer,
984 Context & context, string const & attribute,
985 string & currentvalue, string const & newvalue)
987 context.check_layout(os);
988 string const oldvalue = currentvalue;
989 currentvalue = newvalue;
990 os << '\n' << attribute << ' ' << newvalue << "\n";
991 parse_text_snippet(p, os, flags, outer, context);
992 context.check_layout(os);
993 os << '\n' << attribute << ' ' << oldvalue << "\n";
994 currentvalue = oldvalue;
998 /// get the arguments of a natbib or jurabib citation command
999 std::pair<string, string> getCiteArguments(Parser & p, bool natbibOrder)
1001 // We need to distinguish "" and "[]", so we can't use p.getOpt().
1003 // text before the citation
1005 // text after the citation
1006 string after = p.getFullOpt();
1008 if (!after.empty()) {
1009 before = p.getFullOpt();
1010 if (natbibOrder && !before.empty())
1011 std::swap(before, after);
1013 return std::make_pair(before, after);
1017 /// Convert filenames with TeX macros and/or quotes to something LyX can understand
1018 string const normalize_filename(string const & name)
1020 Parser p(trim(name, "\""));
1023 Token const & t = p.get_token();
1024 if (t.cat() != catEscape)
1026 else if (t.cs() == "lyxdot") {
1027 // This is used by LyX for simple dots in relative
1031 } else if (t.cs() == "space") {
1041 /// Convert \p name from TeX convention (relative to master file) to LyX
1042 /// convention (relative to .lyx file) if it is relative
1043 void fix_relative_filename(string & name)
1045 if (lyx::support::absolutePath(name))
1047 // FIXME UNICODE encoding of name may be wrong (makeAbsPath expects
1049 name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFilename()),
1050 from_utf8(getParentFilePath())));
1054 /// Parse a NoWeb Scrap section. The initial "<<" is already parsed.
1055 void parse_noweb(Parser & p, ostream & os, Context & context)
1057 // assemble the rest of the keyword
1061 Token const & t = p.get_token();
1062 if (t.asInput() == ">" && p.next_token().asInput() == ">") {
1065 scrap = (p.good() && p.next_token().asInput() == "=");
1067 name += p.get_token().asInput();
1070 name += t.asInput();
1073 if (!scrap || !context.new_layout_allowed ||
1074 !context.textclass.hasLayout(from_ascii("Scrap"))) {
1075 cerr << "Warning: Could not interpret '" << name
1076 << "'. Ignoring it." << endl;
1080 // We use new_paragraph instead of check_end_layout because the stuff
1081 // following the noweb chunk needs to start with a \begin_layout.
1082 // This may create a new paragraph even if there was none in the
1083 // noweb file, but the alternative is an invalid LyX file. Since
1084 // noweb code chunks are implemented with a layout style in LyX they
1085 // always must be in an own paragraph.
1086 context.new_paragraph(os);
1087 Context newcontext(true, context.textclass,
1088 context.textclass[from_ascii("Scrap")]);
1089 newcontext.check_layout(os);
1092 Token const & t = p.get_token();
1093 // We abuse the parser a bit, because this is no TeX syntax
1095 if (t.cat() == catEscape)
1096 os << subst(t.asInput(), "\\", "\n\\backslash\n");
1098 os << subst(t.asInput(), "\n", "\n\\newline\n");
1099 // The scrap chunk is ended by an @ at the beginning of a line.
1100 // After the @ the line may contain a comment and/or
1101 // whitespace, but nothing else.
1102 if (t.asInput() == "@" && p.prev_token().cat() == catNewline &&
1103 (p.next_token().cat() == catSpace ||
1104 p.next_token().cat() == catNewline ||
1105 p.next_token().cat() == catComment)) {
1106 while (p.good() && p.next_token().cat() == catSpace)
1107 os << p.get_token().asInput();
1108 if (p.next_token().cat() == catComment)
1109 // The comment includes a final '\n'
1110 os << p.get_token().asInput();
1112 if (p.next_token().cat() == catNewline)
1119 newcontext.check_end_layout(os);
1122 } // anonymous namespace
1125 void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
1128 LayoutPtr newlayout;
1129 // Store the latest bibliographystyle (needed for bibtex inset)
1130 string bibliographystyle;
1131 bool const use_natbib = used_packages.find("natbib") != used_packages.end();
1132 bool const use_jurabib = used_packages.find("jurabib") != used_packages.end();
1134 Token const & t = p.get_token();
1137 cerr << "t: " << t << " flags: " << flags << "\n";
1140 if (flags & FLAG_ITEM) {
1141 if (t.cat() == catSpace)
1144 flags &= ~FLAG_ITEM;
1145 if (t.cat() == catBegin) {
1146 // skip the brace and collect everything to the next matching
1148 flags |= FLAG_BRACE_LAST;
1152 // handle only this single token, leave the loop if done
1153 flags |= FLAG_LEAVE;
1156 if (t.character() == ']' && (flags & FLAG_BRACK_LAST))
1162 if (t.cat() == catMath) {
1163 // we are inside some text mode thingy, so opening new math is allowed
1164 context.check_layout(os);
1165 begin_inset(os, "Formula ");
1166 Token const & n = p.get_token();
1167 if (n.cat() == catMath && outer) {
1168 // TeX's $$...$$ syntax for displayed math
1170 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
1172 p.get_token(); // skip the second '$' token
1174 // simple $...$ stuff
1177 parse_math(p, os, FLAG_SIMPLE, MATH_MODE);
1183 else if (t.cat() == catSuper || t.cat() == catSub)
1184 cerr << "catcode " << t << " illegal in text mode\n";
1186 // Basic support for english quotes. This should be
1187 // extended to other quotes, but is not so easy (a
1188 // left english quote is the same as a right german
1190 else if (t.asInput() == "`"
1191 && p.next_token().asInput() == "`") {
1192 context.check_layout(os);
1193 begin_inset(os, "Quotes ");
1199 else if (t.asInput() == "'"
1200 && p.next_token().asInput() == "'") {
1201 context.check_layout(os);
1202 begin_inset(os, "Quotes ");
1209 else if (t.asInput() == "<"
1210 && p.next_token().asInput() == "<" && noweb_mode) {
1212 parse_noweb(p, os, context);
1215 else if (t.cat() == catSpace || (t.cat() == catNewline && ! p.isParagraph()))
1216 check_space(p, os, context);
1218 else if (t.character() == '[' && noweb_mode &&
1219 p.next_token().character() == '[') {
1220 // These can contain underscores
1222 string const s = p.getFullOpt() + ']';
1223 if (p.next_token().character() == ']')
1226 cerr << "Warning: Inserting missing ']' in '"
1227 << s << "'." << endl;
1228 handle_ert(os, s, context);
1231 else if (t.cat() == catLetter ||
1232 t.cat() == catOther ||
1233 t.cat() == catAlign ||
1234 t.cat() == catParameter) {
1235 // This translates "&" to "\\&" which may be wrong...
1236 context.check_layout(os);
1237 os << t.character();
1240 else if (p.isParagraph()) {
1241 if (context.new_layout_allowed)
1242 context.new_paragraph(os);
1244 handle_ert(os, "\\par ", context);
1245 eat_whitespace(p, os, context, true);
1248 else if (t.cat() == catActive) {
1249 context.check_layout(os);
1250 if (t.character() == '~') {
1251 if (context.layout->free_spacing)
1254 os << "\\InsetSpace ~\n";
1256 os << t.character();
1259 else if (t.cat() == catBegin &&
1260 p.next_token().cat() == catEnd) {
1262 Token const prev = p.prev_token();
1264 if (p.next_token().character() == '`' ||
1265 (prev.character() == '-' &&
1266 p.next_token().character() == '-'))
1267 ; // ignore it in {}`` or -{}-
1269 handle_ert(os, "{}", context);
1273 else if (t.cat() == catBegin) {
1274 context.check_layout(os);
1275 // special handling of font attribute changes
1276 Token const prev = p.prev_token();
1277 Token const next = p.next_token();
1278 TeXFont const oldFont = context.font;
1279 if (next.character() == '[' ||
1280 next.character() == ']' ||
1281 next.character() == '*') {
1283 if (p.next_token().cat() == catEnd) {
1284 os << next.character();
1288 handle_ert(os, "{", context);
1289 parse_text_snippet(p, os,
1292 handle_ert(os, "}", context);
1294 } else if (! context.new_layout_allowed) {
1295 handle_ert(os, "{", context);
1296 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1298 handle_ert(os, "}", context);
1299 } else if (is_known(next.cs(), known_sizes)) {
1300 // next will change the size, so we must
1302 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1304 if (!context.atParagraphStart())
1306 << context.font.size << "\n";
1307 } else if (is_known(next.cs(), known_font_families)) {
1308 // next will change the font family, so we
1309 // must reset it here
1310 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1312 if (!context.atParagraphStart())
1314 << context.font.family << "\n";
1315 } else if (is_known(next.cs(), known_font_series)) {
1316 // next will change the font series, so we
1317 // must reset it here
1318 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1320 if (!context.atParagraphStart())
1322 << context.font.series << "\n";
1323 } else if (is_known(next.cs(), known_font_shapes)) {
1324 // next will change the font shape, so we
1325 // must reset it here
1326 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1328 if (!context.atParagraphStart())
1330 << context.font.shape << "\n";
1331 } else if (is_known(next.cs(), known_old_font_families) ||
1332 is_known(next.cs(), known_old_font_series) ||
1333 is_known(next.cs(), known_old_font_shapes)) {
1334 // next will change the font family, series
1335 // and shape, so we must reset it here
1336 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1338 if (!context.atParagraphStart())
1340 << context.font.family
1342 << context.font.series
1344 << context.font.shape << "\n";
1346 handle_ert(os, "{", context);
1347 parse_text_snippet(p, os, FLAG_BRACE_LAST,
1349 handle_ert(os, "}", context);
1353 else if (t.cat() == catEnd) {
1354 if (flags & FLAG_BRACE_LAST) {
1357 cerr << "stray '}' in text\n";
1358 handle_ert(os, "}", context);
1361 else if (t.cat() == catComment)
1362 parse_comment(p, os, t, context);
1365 // control sequences
1368 else if (t.cs() == "(") {
1369 context.check_layout(os);
1370 begin_inset(os, "Formula");
1372 parse_math(p, os, FLAG_SIMPLE2, MATH_MODE);
1377 else if (t.cs() == "[") {
1378 context.check_layout(os);
1379 begin_inset(os, "Formula");
1381 parse_math(p, os, FLAG_EQUATION, MATH_MODE);
1386 else if (t.cs() == "begin")
1387 parse_environment(p, os, outer, context);
1389 else if (t.cs() == "end") {
1390 if (flags & FLAG_END) {
1391 // eat environment name
1392 string const name = p.getArg('{', '}');
1393 if (name != active_environment())
1394 cerr << "\\end{" + name + "} does not match \\begin{"
1395 + active_environment() + "}\n";
1398 p.error("found 'end' unexpectedly");
1401 else if (t.cs() == "item") {
1404 bool optarg = false;
1405 if (p.next_token().character() == '[') {
1406 p.get_token(); // eat '['
1407 s = parse_text_snippet(p, FLAG_BRACK_LAST,
1412 context.check_layout(os);
1413 if (context.has_item) {
1414 // An item in an unknown list-like environment
1415 // FIXME: Do this in check_layout()!
1416 context.has_item = false;
1418 handle_ert(os, "\\item", context);
1420 handle_ert(os, "\\item ", context);
1423 if (context.layout->labeltype != LABEL_MANUAL) {
1424 // lyx does not support \item[\mybullet]
1425 // in itemize environments
1426 handle_ert(os, "[", context);
1428 handle_ert(os, "]", context);
1429 } else if (!s.empty()) {
1430 // The space is needed to separate the
1431 // item from the rest of the sentence.
1433 eat_whitespace(p, os, context, false);
1438 else if (t.cs() == "bibitem") {
1440 context.check_layout(os);
1443 os << '{' << p.verbatim_item() << '}' << "\n";
1446 else if (t.cs() == "def") {
1447 context.check_layout(os);
1448 eat_whitespace(p, os, context, false);
1449 string name = p.get_token().cs();
1450 eat_whitespace(p, os, context, false);
1456 while (p.next_token().cat() != catBegin) {
1457 if (p.next_token().cat() == catParameter) {
1462 // followed by number?
1463 if (p.next_token().cat() == catOther) {
1464 char c = p.getChar();
1466 // number = current arity + 1?
1467 if (c == arity + '0' + 1)
1472 paramtext += p.get_token().asString();
1474 paramtext += p.get_token().asString();
1479 // only output simple (i.e. compatible) macro as FormulaMacros
1480 string ert = "\\def\\" + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
1482 context.check_layout(os);
1483 begin_inset(os, "FormulaMacro");
1487 handle_ert(os, ert, context);
1490 else if (t.cs() == "noindent") {
1492 context.add_extra_stuff("\\noindent\n");
1495 else if (t.cs() == "appendix") {
1496 context.add_extra_stuff("\\start_of_appendix\n");
1497 // We need to start a new paragraph. Otherwise the
1498 // appendix in 'bla\appendix\chapter{' would start
1500 context.new_paragraph(os);
1501 // We need to make sure that the paragraph is
1502 // generated even if it is empty. Otherwise the
1503 // appendix in '\par\appendix\par\chapter{' would
1505 context.check_layout(os);
1506 // FIXME: This is a hack to prevent paragraph
1507 // deletion if it is empty. Handle this better!
1509 "%dummy comment inserted by tex2lyx to "
1510 "ensure that this paragraph is not empty",
1512 // Both measures above may generate an additional
1513 // empty paragraph, but that does not hurt, because
1514 // whitespace does not matter here.
1515 eat_whitespace(p, os, context, true);
1518 // Must attempt to parse "Section*" before "Section".
1519 else if ((p.next_token().asInput() == "*") &&
1520 context.new_layout_allowed &&
1521 // The single '=' is meant here.
1522 (newlayout = findLayout(context.textclass,
1523 t.cs() + '*')).get() &&
1524 newlayout->isCommand()) {
1526 output_command_layout(os, p, outer, context, newlayout);
1530 // The single '=' is meant here.
1531 else if (context.new_layout_allowed &&
1532 (newlayout = findLayout(context.textclass, t.cs())).get() &&
1533 newlayout->isCommand()) {
1534 output_command_layout(os, p, outer, context, newlayout);
1538 // Special handling for \caption
1539 // FIXME: remove this when InsetCaption is supported.
1540 else if (context.new_layout_allowed &&
1541 t.cs() == captionlayout->latexname()) {
1542 output_command_layout(os, p, outer, context,
1547 else if (t.cs() == "includegraphics") {
1548 bool const clip = p.next_token().asInput() == "*";
1551 map<string, string> opts = split_map(p.getArg('[', ']'));
1553 opts["clip"] = string();
1554 string name = normalize_filename(p.verbatim_item());
1556 string const path = getMasterFilePath();
1557 // We want to preserve relative / absolute filenames,
1558 // therefore path is only used for testing
1559 // FIXME UNICODE encoding of name and path may be
1560 // wrong (makeAbsPath expects utf8)
1561 if (!makeAbsPath(name, path).exists()) {
1562 // The file extension is probably missing.
1563 // Now try to find it out.
1564 string const dvips_name =
1565 find_file(name, path,
1566 known_dvips_graphics_formats);
1567 string const pdftex_name =
1568 find_file(name, path,
1569 known_pdftex_graphics_formats);
1570 if (!dvips_name.empty()) {
1571 if (!pdftex_name.empty()) {
1572 cerr << "This file contains the "
1574 "\"\\includegraphics{"
1576 "However, files\n\""
1577 << dvips_name << "\" and\n\""
1578 << pdftex_name << "\"\n"
1579 "both exist, so I had to make a "
1580 "choice and took the first one.\n"
1581 "Please move the unwanted one "
1582 "someplace else and try again\n"
1583 "if my choice was wrong."
1587 } else if (!pdftex_name.empty())
1591 // FIXME UNICODE encoding of name and path may be
1592 // wrong (makeAbsPath expects utf8)
1593 if (makeAbsPath(name, path).exists())
1594 fix_relative_filename(name);
1596 cerr << "Warning: Could not find graphics file '"
1597 << name << "'." << endl;
1599 context.check_layout(os);
1600 begin_inset(os, "Graphics ");
1601 os << "\n\tfilename " << name << '\n';
1602 if (opts.find("width") != opts.end())
1604 << translate_len(opts["width"]) << '\n';
1605 if (opts.find("height") != opts.end())
1607 << translate_len(opts["height"]) << '\n';
1608 if (opts.find("scale") != opts.end()) {
1609 istringstream iss(opts["scale"]);
1613 os << "\tscale " << val << '\n';
1615 if (opts.find("angle") != opts.end())
1616 os << "\trotateAngle "
1617 << opts["angle"] << '\n';
1618 if (opts.find("origin") != opts.end()) {
1620 string const opt = opts["origin"];
1621 if (opt.find('l') != string::npos) ss << "left";
1622 if (opt.find('r') != string::npos) ss << "right";
1623 if (opt.find('c') != string::npos) ss << "center";
1624 if (opt.find('t') != string::npos) ss << "Top";
1625 if (opt.find('b') != string::npos) ss << "Bottom";
1626 if (opt.find('B') != string::npos) ss << "Baseline";
1627 if (!ss.str().empty())
1628 os << "\trotateOrigin " << ss.str() << '\n';
1630 cerr << "Warning: Ignoring unknown includegraphics origin argument '" << opt << "'\n";
1632 if (opts.find("keepaspectratio") != opts.end())
1633 os << "\tkeepAspectRatio\n";
1634 if (opts.find("clip") != opts.end())
1636 if (opts.find("draft") != opts.end())
1638 if (opts.find("bb") != opts.end())
1639 os << "\tBoundingBox "
1640 << opts["bb"] << '\n';
1641 int numberOfbbOptions = 0;
1642 if (opts.find("bbllx") != opts.end())
1643 numberOfbbOptions++;
1644 if (opts.find("bblly") != opts.end())
1645 numberOfbbOptions++;
1646 if (opts.find("bburx") != opts.end())
1647 numberOfbbOptions++;
1648 if (opts.find("bbury") != opts.end())
1649 numberOfbbOptions++;
1650 if (numberOfbbOptions == 4)
1651 os << "\tBoundingBox "
1652 << opts["bbllx"] << opts["bblly"]
1653 << opts["bburx"] << opts["bbury"] << '\n';
1654 else if (numberOfbbOptions > 0)
1655 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
1656 numberOfbbOptions = 0;
1657 if (opts.find("natwidth") != opts.end())
1658 numberOfbbOptions++;
1659 if (opts.find("natheight") != opts.end())
1660 numberOfbbOptions++;
1661 if (numberOfbbOptions == 2)
1662 os << "\tBoundingBox 0bp 0bp "
1663 << opts["natwidth"] << opts["natheight"] << '\n';
1664 else if (numberOfbbOptions > 0)
1665 cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
1666 ostringstream special;
1667 if (opts.find("hiresbb") != opts.end())
1668 special << "hiresbb,";
1669 if (opts.find("trim") != opts.end())
1671 if (opts.find("viewport") != opts.end())
1672 special << "viewport=" << opts["viewport"] << ',';
1673 if (opts.find("totalheight") != opts.end())
1674 special << "totalheight=" << opts["totalheight"] << ',';
1675 if (opts.find("type") != opts.end())
1676 special << "type=" << opts["type"] << ',';
1677 if (opts.find("ext") != opts.end())
1678 special << "ext=" << opts["ext"] << ',';
1679 if (opts.find("read") != opts.end())
1680 special << "read=" << opts["read"] << ',';
1681 if (opts.find("command") != opts.end())
1682 special << "command=" << opts["command"] << ',';
1683 string s_special = special.str();
1684 if (!s_special.empty()) {
1685 // We had special arguments. Remove the trailing ','.
1686 os << "\tspecial " << s_special.substr(0, s_special.size() - 1) << '\n';
1688 // TODO: Handle the unknown settings better.
1689 // Warn about invalid options.
1690 // Check whether some option was given twice.
1694 else if (t.cs() == "footnote" ||
1695 (t.cs() == "thanks" && context.layout->intitle)) {
1697 context.check_layout(os);
1698 begin_inset(os, "Foot\n");
1699 os << "status collapsed\n\n";
1700 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
1704 else if (t.cs() == "marginpar") {
1706 context.check_layout(os);
1707 begin_inset(os, "Marginal\n");
1708 os << "status collapsed\n\n";
1709 parse_text_in_inset(p, os, FLAG_ITEM, false, context);
1713 else if (t.cs() == "ensuremath") {
1715 context.check_layout(os);
1716 string const s = p.verbatim_item();
1717 if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
1720 handle_ert(os, "\\ensuremath{" + s + "}",
1724 else if (t.cs() == "hfill") {
1725 context.check_layout(os);
1726 os << "\n\\hfill\n";
1731 else if (t.cs() == "makeindex" || t.cs() == "maketitle") {
1732 // FIXME: Somehow prevent title layouts if
1733 // "maketitle" was not found
1735 skip_braces(p); // swallow this
1738 else if (t.cs() == "tableofcontents") {
1740 context.check_layout(os);
1741 begin_inset(os, "LatexCommand \\tableofcontents\n");
1743 skip_braces(p); // swallow this
1746 else if (t.cs() == "listoffigures") {
1748 context.check_layout(os);
1749 begin_inset(os, "FloatList figure\n");
1751 skip_braces(p); // swallow this
1754 else if (t.cs() == "listoftables") {
1756 context.check_layout(os);
1757 begin_inset(os, "FloatList table\n");
1759 skip_braces(p); // swallow this
1762 else if (t.cs() == "listof") {
1763 p.skip_spaces(true);
1764 string const name = p.get_token().asString();
1765 if (context.textclass.floats().typeExist(name)) {
1766 context.check_layout(os);
1767 begin_inset(os, "FloatList ");
1770 p.get_token(); // swallow second arg
1772 handle_ert(os, "\\listof{" + name + "}", context);
1775 else if (t.cs() == "textrm")
1776 parse_text_attributes(p, os, FLAG_ITEM, outer,
1777 context, "\\family",
1778 context.font.family, "roman");
1780 else if (t.cs() == "textsf")
1781 parse_text_attributes(p, os, FLAG_ITEM, outer,
1782 context, "\\family",
1783 context.font.family, "sans");
1785 else if (t.cs() == "texttt")
1786 parse_text_attributes(p, os, FLAG_ITEM, outer,
1787 context, "\\family",
1788 context.font.family, "typewriter");
1790 else if (t.cs() == "textmd")
1791 parse_text_attributes(p, os, FLAG_ITEM, outer,
1792 context, "\\series",
1793 context.font.series, "medium");
1795 else if (t.cs() == "textbf")
1796 parse_text_attributes(p, os, FLAG_ITEM, outer,
1797 context, "\\series",
1798 context.font.series, "bold");
1800 else if (t.cs() == "textup")
1801 parse_text_attributes(p, os, FLAG_ITEM, outer,
1803 context.font.shape, "up");
1805 else if (t.cs() == "textit")
1806 parse_text_attributes(p, os, FLAG_ITEM, outer,
1808 context.font.shape, "italic");
1810 else if (t.cs() == "textsl")
1811 parse_text_attributes(p, os, FLAG_ITEM, outer,
1813 context.font.shape, "slanted");
1815 else if (t.cs() == "textsc")
1816 parse_text_attributes(p, os, FLAG_ITEM, outer,
1818 context.font.shape, "smallcaps");
1820 else if (t.cs() == "textnormal" || t.cs() == "normalfont") {
1821 context.check_layout(os);
1822 TeXFont oldFont = context.font;
1823 context.font.init();
1824 context.font.size = oldFont.size;
1825 os << "\n\\family " << context.font.family << "\n";
1826 os << "\n\\series " << context.font.series << "\n";
1827 os << "\n\\shape " << context.font.shape << "\n";
1828 if (t.cs() == "textnormal") {
1829 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1830 output_font_change(os, context.font, oldFont);
1831 context.font = oldFont;
1833 eat_whitespace(p, os, context, false);
1836 else if (t.cs() == "underbar") {
1837 // Do NOT handle \underline.
1838 // \underbar cuts through y, g, q, p etc.,
1839 // \underline does not.
1840 context.check_layout(os);
1841 os << "\n\\bar under\n";
1842 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1843 context.check_layout(os);
1844 os << "\n\\bar default\n";
1847 else if (t.cs() == "emph" || t.cs() == "noun") {
1848 context.check_layout(os);
1849 os << "\n\\" << t.cs() << " on\n";
1850 parse_text_snippet(p, os, FLAG_ITEM, outer, context);
1851 context.check_layout(os);
1852 os << "\n\\" << t.cs() << " default\n";
1855 else if (use_natbib &&
1856 is_known(t.cs(), known_natbib_commands) &&
1857 ((t.cs() != "citefullauthor" &&
1858 t.cs() != "citeyear" &&
1859 t.cs() != "citeyearpar") ||
1860 p.next_token().asInput() != "*")) {
1861 context.check_layout(os);
1863 // \citet[before][after]{a} \citet[after][before]{a}
1864 // \citet[before][]{a} \citet[][before]{a}
1865 // \citet[after]{a} \citet[after]{a}
1866 // \citet{a} \citet{a}
1867 string command = '\\' + t.cs();
1868 if (p.next_token().asInput() == "*") {
1872 if (command == "\\citefullauthor")
1873 // alternative name for "\\citeauthor*"
1874 command = "\\citeauthor*";
1876 // text before the citation
1878 // text after the citation
1881 boost::tie(before, after) = getCiteArguments(p, true);
1882 if (command == "\\cite") {
1883 // \cite without optional argument means
1884 // \citet, \cite with at least one optional
1885 // argument means \citep.
1886 if (before.empty() && after.empty())
1887 command = "\\citet";
1889 command = "\\citep";
1891 if (before.empty() && after == "[]")
1892 // avoid \citet[]{a}
1894 else if (before == "[]" && after == "[]") {
1895 // avoid \citet[][]{a}
1899 begin_inset(os, "LatexCommand ");
1900 os << command << after << before
1901 << '{' << p.verbatim_item() << "}\n";
1905 else if (use_jurabib &&
1906 is_known(t.cs(), known_jurabib_commands)) {
1907 context.check_layout(os);
1908 string const command = '\\' + t.cs();
1909 char argumentOrder = '\0';
1910 vector<string> const & options = used_packages["jurabib"];
1911 if (std::find(options.begin(), options.end(),
1912 "natbiborder") != options.end())
1913 argumentOrder = 'n';
1914 else if (std::find(options.begin(), options.end(),
1915 "jurabiborder") != options.end())
1916 argumentOrder = 'j';
1918 // text before the citation
1920 // text after the citation
1923 boost::tie(before, after) =
1924 getCiteArguments(p, argumentOrder != 'j');
1925 string const citation = p.verbatim_item();
1926 if (!before.empty() && argumentOrder == '\0') {
1927 cerr << "Warning: Assuming argument order "
1928 "of jurabib version 0.6 for\n'"
1929 << command << before << after << '{'
1930 << citation << "}'.\n"
1931 "Add 'jurabiborder' to the jurabib "
1932 "package options if you used an\n"
1933 "earlier jurabib version." << endl;
1935 begin_inset(os, "LatexCommand ");
1936 os << command << after << before
1937 << '{' << citation << "}\n";
1941 else if (is_known(t.cs(), known_latex_commands)) {
1942 // This needs to be after the check for natbib and
1943 // jurabib commands, because "cite" has different
1944 // arguments with natbib and jurabib.
1945 context.check_layout(os);
1946 begin_inset(os, "LatexCommand ");
1947 os << '\\' << t.cs();
1948 // lyx cannot handle newlines in a latex command
1949 // FIXME: Move the substitution into parser::getOpt()?
1950 os << subst(p.getOpt(), "\n", " ");
1951 os << subst(p.getOpt(), "\n", " ");
1952 os << '{' << subst(p.verbatim_item(), "\n", " ") << "}\n";
1956 else if (is_known(t.cs(), known_quotes)) {
1957 char const * const * where = is_known(t.cs(), known_quotes);
1958 context.check_layout(os);
1959 begin_inset(os, "Quotes ");
1960 os << known_coded_quotes[where - known_quotes];
1962 // LyX adds {} after the quote, so we have to eat
1963 // spaces here if there are any before a possible
1965 eat_whitespace(p, os, context, false);
1969 else if (is_known(t.cs(), known_sizes) &&
1970 context.new_layout_allowed) {
1971 char const * const * where = is_known(t.cs(), known_sizes);
1972 context.check_layout(os);
1973 TeXFont const oldFont = context.font;
1974 context.font.size = known_coded_sizes[where - known_sizes];
1975 output_font_change(os, oldFont, context.font);
1976 eat_whitespace(p, os, context, false);
1979 else if (is_known(t.cs(), known_font_families) &&
1980 context.new_layout_allowed) {
1981 char const * const * where =
1982 is_known(t.cs(), known_font_families);
1983 context.check_layout(os);
1984 TeXFont const oldFont = context.font;
1985 context.font.family =
1986 known_coded_font_families[where - known_font_families];
1987 output_font_change(os, oldFont, context.font);
1988 eat_whitespace(p, os, context, false);
1991 else if (is_known(t.cs(), known_font_series) &&
1992 context.new_layout_allowed) {
1993 char const * const * where =
1994 is_known(t.cs(), known_font_series);
1995 context.check_layout(os);
1996 TeXFont const oldFont = context.font;
1997 context.font.series =
1998 known_coded_font_series[where - known_font_series];
1999 output_font_change(os, oldFont, context.font);
2000 eat_whitespace(p, os, context, false);
2003 else if (is_known(t.cs(), known_font_shapes) &&
2004 context.new_layout_allowed) {
2005 char const * const * where =
2006 is_known(t.cs(), known_font_shapes);
2007 context.check_layout(os);
2008 TeXFont const oldFont = context.font;
2009 context.font.shape =
2010 known_coded_font_shapes[where - known_font_shapes];
2011 output_font_change(os, oldFont, context.font);
2012 eat_whitespace(p, os, context, false);
2014 else if (is_known(t.cs(), known_old_font_families) &&
2015 context.new_layout_allowed) {
2016 char const * const * where =
2017 is_known(t.cs(), known_old_font_families);
2018 context.check_layout(os);
2019 TeXFont const oldFont = context.font;
2020 context.font.init();
2021 context.font.size = oldFont.size;
2022 context.font.family =
2023 known_coded_font_families[where - known_old_font_families];
2024 output_font_change(os, oldFont, context.font);
2025 eat_whitespace(p, os, context, false);
2028 else if (is_known(t.cs(), known_old_font_series) &&
2029 context.new_layout_allowed) {
2030 char const * const * where =
2031 is_known(t.cs(), known_old_font_series);
2032 context.check_layout(os);
2033 TeXFont const oldFont = context.font;
2034 context.font.init();
2035 context.font.size = oldFont.size;
2036 context.font.series =
2037 known_coded_font_series[where - known_old_font_series];
2038 output_font_change(os, oldFont, context.font);
2039 eat_whitespace(p, os, context, false);
2042 else if (is_known(t.cs(), known_old_font_shapes) &&
2043 context.new_layout_allowed) {
2044 char const * const * where =
2045 is_known(t.cs(), known_old_font_shapes);
2046 context.check_layout(os);
2047 TeXFont const oldFont = context.font;
2048 context.font.init();
2049 context.font.size = oldFont.size;
2050 context.font.shape =
2051 known_coded_font_shapes[where - known_old_font_shapes];
2052 output_font_change(os, oldFont, context.font);
2053 eat_whitespace(p, os, context, false);
2056 else if (t.cs() == "LyX" || t.cs() == "TeX"
2057 || t.cs() == "LaTeX") {
2058 context.check_layout(os);
2060 skip_braces(p); // eat {}
2063 else if (t.cs() == "LaTeXe") {
2064 context.check_layout(os);
2066 skip_braces(p); // eat {}
2069 else if (t.cs() == "ldots") {
2070 context.check_layout(os);
2072 os << "\\SpecialChar \\ldots{}\n";
2075 else if (t.cs() == "lyxarrow") {
2076 context.check_layout(os);
2077 os << "\\SpecialChar \\menuseparator\n";
2081 else if (t.cs() == "textcompwordmark") {
2082 context.check_layout(os);
2083 os << "\\SpecialChar \\textcompwordmark{}\n";
2087 else if (t.cs() == "@" && p.next_token().asInput() == ".") {
2088 context.check_layout(os);
2089 os << "\\SpecialChar \\@.\n";
2093 else if (t.cs() == "-") {
2094 context.check_layout(os);
2095 os << "\\SpecialChar \\-\n";
2098 else if (t.cs() == "textasciitilde") {
2099 context.check_layout(os);
2104 else if (t.cs() == "textasciicircum") {
2105 context.check_layout(os);
2110 else if (t.cs() == "textbackslash") {
2111 context.check_layout(os);
2112 os << "\n\\backslash\n";
2116 else if (t.cs() == "_" || t.cs() == "&" || t.cs() == "#"
2117 || t.cs() == "$" || t.cs() == "{" || t.cs() == "}"
2119 context.check_layout(os);
2123 else if (t.cs() == "char") {
2124 context.check_layout(os);
2125 if (p.next_token().character() == '`') {
2127 if (p.next_token().cs() == "\"") {
2132 handle_ert(os, "\\char`", context);
2135 handle_ert(os, "\\char", context);
2139 else if (t.cs() == "verb") {
2140 context.check_layout(os);
2141 char const delimiter = p.next_token().character();
2142 string const arg = p.getArg(delimiter, delimiter);
2144 oss << "\\verb" << delimiter << arg << delimiter;
2145 handle_ert(os, oss.str(), context);
2148 else if (t.cs() == "\"") {
2149 context.check_layout(os);
2150 string const name = p.verbatim_item();
2151 if (name == "a") os << '\xe4';
2152 else if (name == "o") os << '\xf6';
2153 else if (name == "u") os << '\xfc';
2154 else if (name == "A") os << '\xc4';
2155 else if (name == "O") os << '\xd6';
2156 else if (name == "U") os << '\xdc';
2157 else handle_ert(os, "\"{" + name + "}", context);
2160 // Problem: \= creates a tabstop inside the tabbing environment
2161 // and else an accent. In the latter case we really would want
2162 // \={o} instead of \= o.
2163 else if (t.cs() == "=" && (flags & FLAG_TABBING))
2164 handle_ert(os, t.asInput(), context);
2166 else if (t.cs() == "H" || t.cs() == "c" || t.cs() == "^"
2167 || t.cs() == "'" || t.cs() == "`"
2168 || t.cs() == "~" || t.cs() == "." || t.cs() == "=") {
2169 // we need the trim as the LyX parser chokes on such spaces
2170 // The argument of InsetLatexAccent is parsed as a
2171 // subset of LaTeX, so don't parse anything here,
2172 // but use the raw argument.
2173 // Otherwise we would convert \~{\i} wrongly.
2174 // This will of course not translate \~{\ss} to \~{ß},
2175 // but that does at least compile and does only look
2176 // strange on screen.
2177 context.check_layout(os);
2178 os << "\\i \\" << t.cs() << "{"
2179 << trim(p.verbatim_item(), " ")
2183 else if (t.cs() == "ss") {
2184 context.check_layout(os);
2186 skip_braces(p); // eat {}
2189 else if (t.cs() == "i" || t.cs() == "j" || t.cs() == "l" ||
2191 context.check_layout(os);
2192 os << "\\i \\" << t.cs() << "{}\n";
2193 skip_braces(p); // eat {}
2196 else if (t.cs() == "\\") {
2197 context.check_layout(os);
2198 string const next = p.next_token().asInput();
2200 handle_ert(os, "\\\\" + p.getOpt(), context);
2201 else if (next == "*") {
2203 handle_ert(os, "\\\\*" + p.getOpt(), context);
2206 os << "\n\\newline\n";
2210 else if (t.cs() == "newline") {
2211 context.check_layout(os);
2212 os << "\n\\" << t.cs() << "\n";
2213 skip_braces(p); // eat {}
2216 else if (t.cs() == "input" || t.cs() == "include"
2217 || t.cs() == "verbatiminput") {
2218 string name = '\\' + t.cs();
2219 if (t.cs() == "verbatiminput"
2220 && p.next_token().asInput() == "*")
2221 name += p.get_token().asInput();
2222 context.check_layout(os);
2223 begin_inset(os, "Include ");
2224 string filename(normalize_filename(p.getArg('{', '}')));
2225 string const path = getMasterFilePath();
2226 // We want to preserve relative / absolute filenames,
2227 // therefore path is only used for testing
2228 // FIXME UNICODE encoding of filename and path may be
2229 // wrong (makeAbsPath expects utf8)
2230 if ((t.cs() == "include" || t.cs() == "input") &&
2231 !makeAbsPath(filename, path).exists()) {
2232 // The file extension is probably missing.
2233 // Now try to find it out.
2234 string const tex_name =
2235 find_file(filename, path,
2236 known_tex_extensions);
2237 if (!tex_name.empty())
2238 filename = tex_name;
2240 // FIXME UNICODE encoding of filename and path may be
2241 // wrong (makeAbsPath expects utf8)
2242 if (makeAbsPath(filename, path).exists()) {
2243 string const abstexname =
2244 makeAbsPath(filename, path).absFilename();
2245 string const abslyxname =
2246 changeExtension(abstexname, ".lyx");
2247 fix_relative_filename(filename);
2248 string const lyxname =
2249 changeExtension(filename, ".lyx");
2250 if (t.cs() != "verbatiminput" &&
2251 tex2lyx(abstexname, FileName(abslyxname))) {
2252 os << name << '{' << lyxname << "}\n";
2254 os << name << '{' << filename << "}\n";
2257 cerr << "Warning: Could not find included file '"
2258 << filename << "'." << endl;
2259 os << name << '{' << filename << "}\n";
2261 os << "preview false\n";
2265 else if (t.cs() == "bibliographystyle") {
2266 // store new bibliographystyle
2267 bibliographystyle = p.verbatim_item();
2268 // output new bibliographystyle.
2269 // This is only necessary if used in some other macro than \bibliography.
2270 handle_ert(os, "\\bibliographystyle{" + bibliographystyle + "}", context);
2273 else if (t.cs() == "bibliography") {
2274 context.check_layout(os);
2275 begin_inset(os, "LatexCommand ");
2277 // Do we have a bibliographystyle set?
2278 if (!bibliographystyle.empty()) {
2279 os << '[' << bibliographystyle << ']';
2281 os << '{' << p.verbatim_item() << "}\n";
2285 else if (t.cs() == "parbox")
2286 parse_box(p, os, FLAG_ITEM, outer, context, true);
2288 else if (t.cs() == "smallskip" ||
2289 t.cs() == "medskip" ||
2290 t.cs() == "bigskip" ||
2291 t.cs() == "vfill") {
2292 context.check_layout(os);
2293 begin_inset(os, "VSpace ");
2299 else if (is_known(t.cs(), known_spaces)) {
2300 char const * const * where = is_known(t.cs(), known_spaces);
2301 context.check_layout(os);
2302 begin_inset(os, "InsetSpace ");
2303 os << '\\' << known_coded_spaces[where - known_spaces]
2305 // LaTeX swallows whitespace after all spaces except
2306 // "\\,". We have to do that here, too, because LyX
2307 // adds "{}" which would make the spaces significant.
2309 eat_whitespace(p, os, context, false);
2310 // LyX adds "{}" after all spaces except "\\ " and
2311 // "\\,", so we have to remove "{}".
2312 // "\\,{}" is equivalent to "\\," in LaTeX, so we
2313 // remove the braces after "\\,", too.
2318 else if (t.cs() == "newpage" ||
2319 t.cs() == "pagebreak" ||
2320 t.cs() == "clearpage" ||
2321 t.cs() == "cleardoublepage") {
2322 context.check_layout(os);
2323 os << "\n\\" << t.cs() << "\n";
2324 skip_braces(p); // eat {}
2327 else if (t.cs() == "newcommand" ||
2328 t.cs() == "providecommand" ||
2329 t.cs() == "renewcommand") {
2330 // these could be handled by parse_command(), but
2331 // we need to call add_known_command() here.
2332 string name = t.asInput();
2333 if (p.next_token().asInput() == "*") {
2334 // Starred form. Eat '*'
2338 string const command = p.verbatim_item();
2339 string const opt1 = p.getOpt();
2340 string const opt2 = p.getFullOpt();
2341 add_known_command(command, opt1, !opt2.empty());
2342 string const ert = name + '{' + command + '}' +
2344 '{' + p.verbatim_item() + '}';
2346 context.check_layout(os);
2347 begin_inset(os, "FormulaMacro");
2352 else if (t.cs() == "vspace") {
2353 bool starred = false;
2354 if (p.next_token().asInput() == "*") {
2358 string const length = p.verbatim_item();
2361 bool valid = splitLatexLength(length, valstring, unit);
2362 bool known_vspace = false;
2363 bool known_unit = false;
2366 istringstream iss(valstring);
2369 if (unit == "\\smallskipamount") {
2371 known_vspace = true;
2372 } else if (unit == "\\medskipamount") {
2374 known_vspace = true;
2375 } else if (unit == "\\bigskipamount") {
2377 known_vspace = true;
2378 } else if (unit == "\\fill") {
2380 known_vspace = true;
2383 if (!known_vspace) {
2384 switch (unitFromString(unit)) {
2405 if (known_unit || known_vspace) {
2406 // Literal length or known variable
2407 context.check_layout(os);
2408 begin_inset(os, "VSpace ");
2416 // LyX can't handle other length variables in Inset VSpace
2417 string name = t.asInput();
2422 handle_ert(os, name + '{' + unit + '}', context);
2423 else if (value == -1.0)
2424 handle_ert(os, name + "{-" + unit + '}', context);
2426 handle_ert(os, name + '{' + valstring + unit + '}', context);
2428 handle_ert(os, name + '{' + length + '}', context);
2433 //cerr << "#: " << t << " mode: " << mode << endl;
2434 // heuristic: read up to next non-nested space
2436 string s = t.asInput();
2437 string z = p.verbatim_item();
2438 while (p.good() && z != " " && z.size()) {
2439 //cerr << "read: " << z << endl;
2441 z = p.verbatim_item();
2443 cerr << "found ERT: " << s << endl;
2444 handle_ert(os, s + ' ', context);
2446 string name = t.asInput();
2447 if (p.next_token().asInput() == "*") {
2448 // Starred commands like \vspace*{}
2449 p.get_token(); // Eat '*'
2452 if (! parse_command(name, p, os, outer, context))
2453 handle_ert(os, name, context);
2456 if (flags & FLAG_LEAVE) {
2457 flags &= ~FLAG_LEAVE;