*
* \author André Pönitz
* \author Jean-Marc Lasgouttes
+ * \author Uwe Stöhr
*
* Full author contact details are available in file CREDITS.
*/
#include <config.h>
#include "tex2lyx.h"
+
#include "Context.h"
+#include "Encoding.h"
#include "FloatList.h"
-#include "lengthcommon.h"
-#include "support/lstrings.h"
+#include "Layout.h"
+#include "Length.h"
+
+#include "support/lassert.h"
#include "support/convert.h"
+#include "support/FileName.h"
#include "support/filetools.h"
+#include "support/lstrings.h"
-#include <boost/filesystem/operations.hpp>
-#include <boost/tuple/tuple.hpp>
-
+#include <algorithm>
#include <iostream>
#include <map>
#include <sstream>
#include <vector>
+using namespace std;
+using namespace lyx::support;
namespace lyx {
-using support::addExtension;
-using support::changeExtension;
-using support::FileName;
-using support::makeAbsPath;
-using support::makeRelPath;
-using support::rtrim;
-using support::suffixIs;
-using support::contains;
-using support::subst;
-
-using std::cerr;
-using std::endl;
-
-using std::map;
-using std::ostream;
-using std::ostringstream;
-using std::istringstream;
-using std::string;
-using std::vector;
-
-namespace fs = boost::filesystem;
-
void parse_text_in_inset(Parser & p, ostream & os, unsigned flags, bool outer,
Context const & context)
Context & context)
{
Context newcontext(context);
- // Don't inherit the extra stuff
- newcontext.extra_stuff.clear();
+ // Don't inherit the paragraph-level extra stuff
+ newcontext.par_extra_stuff.clear();
parse_text(p, os, flags, outer, newcontext);
// Make sure that we don't create invalid .lyx files
context.need_layout = newcontext.need_layout;
newcontext.need_end_layout = false;
newcontext.new_layout_allowed = false;
// Avoid warning by Context::~Context()
- newcontext.extra_stuff.clear();
+ newcontext.par_extra_stuff.clear();
ostringstream os;
parse_text_snippet(p, os, flags, outer, newcontext);
return os.str();
}
-char const * const known_latex_commands[] = { "ref", "cite", "label", "index",
-"printindex", "pageref", "url", "vref", "vpageref", "prettyref", "eqref", 0 };
+char const * const known_ref_commands[] = { "ref", "pageref", "vref",
+ "vpageref", "prettyref", "eqref", 0 };
/*!
* natbib commands.
- * We can't put these into known_latex_commands because the argument order
- * is reversed in lyx if there are 2 arguments.
* The starred forms are also known.
*/
char const * const known_natbib_commands[] = { "cite", "citet", "citep",
/*!
* jurabib commands.
- * We can't put these into known_latex_commands because the argument order
- * is reversed in lyx if there are 2 arguments.
* No starred form other than "cite*" known.
*/
char const * const known_jurabib_commands[] = { "cite", "citet", "citep",
"citefield", "citetitle", "cite*", 0 };
/// LaTeX names for quotes
-char const * const known_quotes[] = { "glqq", "grqq", "quotedblbase",
-"textquotedblleft", "quotesinglbase", "guilsinglleft", "guilsinglright", 0};
+char const * const known_quotes[] = { "dq", "guillemotleft", "flqq", "og",
+"guillemotright", "frqq", "fg", "glq", "glqq", "textquoteleft", "grq", "grqq",
+"quotedblbase", "textquotedblleft", "quotesinglbase", "textquoteright", "flq",
+"guilsinglleft", "frq", "guilsinglright", 0};
/// the same as known_quotes with .lyx names
-char const * const known_coded_quotes[] = { "gld", "grd", "gld",
-"grd", "gls", "fls", "frs", 0};
+char const * const known_coded_quotes[] = { "prd", "ard", "ard", "ard",
+"ald", "ald", "ald", "gls", "gld", "els", "els", "grd",
+"gld", "grd", "gls", "ers", "fls",
+"fls", "frs", "frs", 0};
/// LaTeX names for font sizes
char const * const known_sizes[] = { "tiny", "scriptsize", "footnotesize",
"small", "normalsize", "large", "Large", "LARGE", "huge", "Huge", 0};
-/// the same as known_sizes with .lyx names
-char const * const known_coded_sizes[] = { "tiny", "scriptsize", "footnotesize",
-"small", "normal", "large", "larger", "largest", "huge", "giant", 0};
+/// the same as known_sizes with .lyx names plus a default entry
+char const * const known_coded_sizes[] = { "default", "tiny", "scriptsize", "footnotesize",
+"small", "normal", "large", "larger", "largest", "huge", "giant", 0};
/// LaTeX 2.09 names for font families
char const * const known_old_font_families[] = { "rm", "sf", "tt", 0};
if (contains(len, '\\'))
unit = trim(string(len, i));
else
- unit = support::ascii_lowercase(trim(string(len, i)));
+ unit = ascii_lowercase(trim(string(len, i)));
return true;
}
-/// A simple function to translate a latex length to something lyx can
+/// A simple function to translate a latex length to something LyX can
/// understand. Not perfect, but rather best-effort.
bool translate_len(string const & length, string & valstring, string & unit)
{
string find_file(string const & name, string const & path,
char const * const * extensions)
{
- // FIXME UNICODE encoding of name and path may be wrong (makeAbsPath
- // expects utf8)
for (char const * const * what = extensions; *what; ++what) {
string const trial = addExtension(name, *what);
- if (fs::exists(makeAbsPath(trial, path).toFilesystemEncoding()))
+ if (makeAbsPath(trial, path).exists())
return trial;
}
return string();
os << "\n\\begin_inset " << name;
}
+/*// use this void when format 288 is supported
+void begin_command_inset(ostream & os, string const & name,
+ string const & latexname)
+{
+ os << "\n\\begin_inset CommandInset " << name;
+ os << "\nLatexCommand " << latexname << "\n";
+}*/
+
void end_inset(ostream & os)
{
}
-class isLayout : public std::unary_function<Layout_ptr, bool> {
-public:
- isLayout(string const name) : name_(name) {}
- bool operator()(Layout_ptr const & ptr) const {
- return ptr->latexname() == name_;
- }
-private:
- string const name_;
-};
-
-
-Layout_ptr findLayout(TextClass const & textclass,
- string const & name)
+Layout const * findLayout(TextClass const & textclass, string const & name)
{
- TextClass::const_iterator beg = textclass.begin();
- TextClass::const_iterator end = textclass.end();
-
- TextClass::const_iterator
- it = std::find_if(beg, end, isLayout(name));
-
- return (it == end) ? Layout_ptr() : *it;
+ DocumentClass::const_iterator lit = textclass.begin();
+ DocumentClass::const_iterator len = textclass.end();
+ for (; lit != len; ++lit)
+ if (lit->latexname() == name)
+ return &*lit;
+ return 0;
}
void output_command_layout(ostream & os, Parser & p, bool outer,
Context & parent_context,
- Layout_ptr newlayout)
+ Layout const * newlayout)
{
parent_context.check_end_layout(os);
Context context(true, parent_context.textclass, newlayout,
}
context.check_deeper(os);
context.check_layout(os);
- if (context.layout->optionalargs > 0) {
+ unsigned int optargs = 0;
+ while (optargs < context.layout->optargs) {
eat_whitespace(p, os, context, false);
- if (p.next_token().character() == '[') {
- p.get_token(); // eat '['
- begin_inset(os, "OptArg\n");
- os << "status collapsed\n\n";
- parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
- end_inset(os);
- eat_whitespace(p, os, context, false);
- }
+ if (p.next_token().character() != '[')
+ break;
+ p.get_token(); // eat '['
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ optargs++;
}
+#if 0
+ // This is the code needed to parse required arguments, but
+ // required arguments come into being only much later than the
+ // file format tex2lyx is presently outputting.
+ unsigned int reqargs = 0;
+ while (reqargs < context.layout->reqargs) {
+ eat_whitespace(p, os, context, false);
+ if (p.next_token().character() != '{')
+ break;
+ p.get_token(); // eat '{'
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n\n";
+ parse_text_in_inset(p, os, FLAG_BRACE_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ reqargs++;
+ }
+#endif
parse_text(p, os, FLAG_ITEM, outer, context);
context.check_end_layout(os);
if (parent_context.deeper_paragraph) {
* The drawback is that the logic inside the function becomes
* complicated, and that is the reason why it is not implemented.
*/
-void check_space(Parser const & p, ostream & os, Context & context)
+void check_space(Parser & p, ostream & os, Context & context)
{
Token const next = p.next_token();
Token const curr = p.curr_token();
{
string position;
string inner_pos;
- string height_value = "0";
- string height_unit = "pt";
- string height_special = "none";
+ // We need to set the height to the LaTeX default of 1\\totalheight
+ // for the case when no height argument is given
+ string height_value = "1";
+ string height_unit = "in";
+ string height_special = "totalheight";
string latex_height;
if (p.next_token().asInput() == "[") {
position = p.getArg('[', ']');
parse_text_in_inset(p, os, flags, outer, parent_context);
end_inset(os);
#ifdef PRESERVE_LAYOUT
- // lyx puts a % after the end of the minipage
+ // LyX puts a % after the end of the minipage
if (p.next_token().cat() == catNewline && p.next_token().cs().size() > 1) {
// new paragraph
//handle_comment(os, "%dummy", parent_context);
void parse_environment(Parser & p, ostream & os, bool outer,
Context & parent_context)
{
- Layout_ptr newlayout;
+ Layout const * newlayout;
string const name = p.getArg('{', '}');
const bool is_starred = suffixIs(name, '*');
string const unstarred_name = rtrim(name, "*");
parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
end_inset(os);
p.skip_spaces();
+ skip_braces(p); // eat {} that might by set by LyX behind comments
}
else if (name == "lyxgreyedout") {
p.skip_spaces();
}
+ else if (name == "framed") {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_inset(os, "Note Framed\n");
+ os << "status open\n";
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ end_inset(os);
+ p.skip_spaces();
+ }
+
+ else if (name == "shaded") {
+ eat_whitespace(p, os, parent_context, false);
+ parent_context.check_layout(os);
+ begin_inset(os, "Note Shaded\n");
+ os << "status open\n";
+ parse_text_in_inset(p, os, FLAG_END, outer, parent_context);
+ end_inset(os);
+ p.skip_spaces();
+ }
+
else if (!parent_context.new_layout_allowed)
parse_unknown_environment(p, name, os, FLAG_END, outer,
parent_context);
- // Alignment settings
- else if (name == "center" || name == "flushleft" || name == "flushright" ||
- name == "centering" || name == "raggedright" || name == "raggedleft") {
+ // Alignment and spacing settings
+ // FIXME (bug xxxx): These settings can span multiple paragraphs and
+ // therefore are totally broken!
+ // Note that \centering, raggedright, and raggedleft cannot be handled, as
+ // they are commands not environments. They are furthermore switches that
+ // can be ended by another switches, but also by commands like \footnote or
+ // \parbox. So the only safe way is to leave them untouched.
+ else if (name == "center" || name == "centering" ||
+ name == "flushleft" || name == "flushright" ||
+ name == "singlespace" || name == "onehalfspace" ||
+ name == "doublespace" || name == "spacing") {
eat_whitespace(p, os, parent_context, false);
// We must begin a new paragraph if not already done
if (! parent_context.atParagraphStart()) {
parent_context.check_end_layout(os);
parent_context.new_paragraph(os);
}
- if (name == "flushleft" || name == "raggedright")
+ if (name == "flushleft")
parent_context.add_extra_stuff("\\align left\n");
- else if (name == "flushright" || name == "raggedleft")
+ else if (name == "flushright")
parent_context.add_extra_stuff("\\align right\n");
- else
+ else if (name == "center" || name == "centering")
parent_context.add_extra_stuff("\\align center\n");
+ else if (name == "singlespace")
+ parent_context.add_extra_stuff("\\paragraph_spacing single\n");
+ else if (name == "onehalfspace")
+ parent_context.add_extra_stuff("\\paragraph_spacing onehalf\n");
+ else if (name == "doublespace")
+ parent_context.add_extra_stuff("\\paragraph_spacing double\n");
+ else if (name == "spacing")
+ parent_context.add_extra_stuff("\\paragraph_spacing other " + p.verbatim_item() + "\n");
parse_text(p, os, FLAG_END, outer, parent_context);
- // Just in case the environment is empty ..
+ // Just in case the environment is empty
parent_context.extra_stuff.erase();
// We must begin a new paragraph to reset the alignment
parent_context.new_paragraph(os);
}
// The single '=' is meant here.
- else if ((newlayout = findLayout(parent_context.textclass, name)).get() &&
+ else if ((newlayout = findLayout(parent_context.textclass, name)) &&
newlayout->isEnvironment()) {
eat_whitespace(p, os, parent_context, false);
Context context(true, parent_context.textclass, newlayout,
parent_context.check_end_layout(os);
switch (context.layout->latextype) {
case LATEX_LIST_ENVIRONMENT:
- context.extra_stuff = "\\labelwidthstring "
- + p.verbatim_item() + '\n';
+ context.add_par_extra_stuff("\\labelwidthstring "
+ + p.verbatim_item() + '\n');
p.skip_spaces();
break;
case LATEX_BIB_ENVIRONMENT:
/// parses a comment and outputs it to \p os.
void parse_comment(Parser & p, ostream & os, Token const & t, Context & context)
{
- BOOST_ASSERT(t.cat() == catComment);
+ LASSERT(t.cat() == catComment, return);
if (!t.cs().empty()) {
context.check_layout(os);
handle_comment(os, '%' + t.cs(), context);
/// get the arguments of a natbib or jurabib citation command
-std::pair<string, string> getCiteArguments(Parser & p, bool natbibOrder)
+void get_cite_arguments(Parser & p, bool natbibOrder,
+ string & before, string & after)
{
// We need to distinguish "" and "[]", so we can't use p.getOpt().
// text before the citation
- string before;
+ before.clear();
// text after the citation
- string after = p.getFullOpt();
+ after = p.getFullOpt();
if (!after.empty()) {
before = p.getFullOpt();
if (natbibOrder && !before.empty())
- std::swap(before, after);
+ swap(before, after);
}
- return std::make_pair(before, after);
}
-/// Convert filenames with TeX macros and/or quotes to something LyX can understand
+/// Convert filenames with TeX macros and/or quotes to something LyX
+/// can understand
string const normalize_filename(string const & name)
{
Parser p(trim(name, "\""));
/// convention (relative to .lyx file) if it is relative
void fix_relative_filename(string & name)
{
- if (lyx::support::absolutePath(name))
+ if (FileName::isAbsolute(name))
return;
- // FIXME UNICODE encoding of name may be wrong (makeAbsPath expects
- // utf8)
- name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFilename()),
- from_utf8(getParentFilePath())));
+
+ name = to_utf8(makeRelPath(from_utf8(makeAbsPath(name, getMasterFilePath()).absFileName()),
+ from_utf8(getParentFilePath())));
}
}
if (!scrap || !context.new_layout_allowed ||
- !context.textclass.hasLayout("Scrap")) {
+ !context.textclass.hasLayout(from_ascii("Scrap"))) {
cerr << "Warning: Could not interpret '" << name
<< "'. Ignoring it." << endl;
return;
// noweb code chunks are implemented with a layout style in LyX they
// always must be in an own paragraph.
context.new_paragraph(os);
- Context newcontext(true, context.textclass, context.textclass["Scrap"]);
+ Context newcontext(true, context.textclass,
+ &context.textclass[from_ascii("Scrap")]);
newcontext.check_layout(os);
os << name;
while (p.good()) {
void parse_text(Parser & p, ostream & os, unsigned flags, bool outer,
Context & context)
{
- Layout_ptr newlayout;
+ Layout const * newlayout = 0;
// Store the latest bibliographystyle (needed for bibtex inset)
string bibliographystyle;
bool const use_natbib = used_packages.find("natbib") != used_packages.end();
if (t.character() == ']' && (flags & FLAG_BRACK_LAST))
return;
+ if (t.character() == '}' && (flags & FLAG_BRACE_LAST))
+ return;
//
// cat codes
// extended to other quotes, but is not so easy (a
// left english quote is the same as a right german
// quote...)
- else if (t.asInput() == "`"
- && p.next_token().asInput() == "`") {
+ else if (t.asInput() == "`" && p.next_token().asInput() == "`") {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << "eld";
p.get_token();
skip_braces(p);
}
- else if (t.asInput() == "'"
- && p.next_token().asInput() == "'") {
+ else if (t.asInput() == "'" && p.next_token().asInput() == "'") {
context.check_layout(os);
begin_inset(os, "Quotes ");
os << "erd";
skip_braces(p);
}
+ else if (t.asInput() == ">" && p.next_token().asInput() == ">") {
+ context.check_layout(os);
+ begin_inset(os, "Quotes ");
+ os << "ald";
+ end_inset(os);
+ p.get_token();
+ skip_braces(p);
+ }
+
+ else if (t.asInput() == "<" && p.next_token().asInput() == "<") {
+ context.check_layout(os);
+ begin_inset(os, "Quotes ");
+ os << "ard";
+ end_inset(os);
+ p.get_token();
+ skip_braces(p);
+ }
+
else if (t.asInput() == "<"
&& p.next_token().asInput() == "<" && noweb_mode) {
p.get_token();
check_space(p, os, context);
else if (t.character() == '[' && noweb_mode &&
- p.next_token().character() == '[') {
+ p.next_token().character() == '[') {
// These can contain underscores
p.putback();
string const s = p.getFullOpt() + ']';
t.cat() == catParameter) {
// This translates "&" to "\\&" which may be wrong...
context.check_layout(os);
- os << t.character();
+ os << t.cs();
}
else if (p.isParagraph()) {
else
os << "\\InsetSpace ~\n";
} else
- os << t.character();
+ os << t.cs();
}
else if (t.cat() == catBegin &&
next.character() == '*') {
p.get_token();
if (p.next_token().cat() == catEnd) {
- os << next.character();
+ os << next.cs();
p.get_token();
} else {
p.putback();
}
if (optarg) {
if (context.layout->labeltype != LABEL_MANUAL) {
- // lyx does not support \item[\mybullet]
+ // LyX does not support \item[\mybullet]
// in itemize environments
handle_ert(os, "[", context);
os << s;
else if (t.cs() == "bibitem") {
context.set_item();
context.check_layout(os);
- os << "\\bibitem ";
- os << p.getOpt();
- os << '{' << p.verbatim_item() << '}' << "\n";
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ os << "label \"" << p.getOptContent() << "\"\n";
+ os << "key \"" << p.verbatim_item() << "\"\n";
+ end_inset(os);
}
else if (t.cs() == "def") {
context.check_layout(os);
eat_whitespace(p, os, context, false);
string name = p.get_token().cs();
- while (p.next_token().cat() != catBegin)
- name += p.get_token().asString();
- handle_ert(os, "\\def\\" + name + '{' + p.verbatim_item() + '}', context);
+ eat_whitespace(p, os, context, false);
+
+ // parameter text
+ bool simple = true;
+ string paramtext;
+ int arity = 0;
+ while (p.next_token().cat() != catBegin) {
+ if (p.next_token().cat() == catParameter) {
+ // # found
+ p.get_token();
+ paramtext += "#";
+
+ // followed by number?
+ if (p.next_token().cat() == catOther) {
+ char c = p.getChar();
+ paramtext += c;
+ // number = current arity + 1?
+ if (c == arity + '0' + 1)
+ ++arity;
+ else
+ simple = false;
+ } else
+ paramtext += p.get_token().cs();
+ } else {
+ paramtext += p.get_token().cs();
+ simple = false;
+ }
+ }
+
+ // only output simple (i.e. compatible) macro as FormulaMacros
+ string ert = "\\def\\" + name + ' ' + paramtext + '{' + p.verbatim_item() + '}';
+ if (simple) {
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n" << ert;
+ end_inset(os);
+ } else
+ handle_ert(os, ert, context);
}
else if (t.cs() == "noindent") {
p.skip_spaces();
- context.add_extra_stuff("\\noindent\n");
+ context.add_par_extra_stuff("\\noindent\n");
}
else if (t.cs() == "appendix") {
- context.add_extra_stuff("\\start_of_appendix\n");
+ context.add_par_extra_stuff("\\start_of_appendix\n");
// We need to start a new paragraph. Otherwise the
// appendix in 'bla\appendix\chapter{' would start
// too late.
eat_whitespace(p, os, context, true);
}
+ // Starred section headings
// Must attempt to parse "Section*" before "Section".
else if ((p.next_token().asInput() == "*") &&
context.new_layout_allowed &&
- // The single '=' is meant here.
- (newlayout = findLayout(context.textclass,
- t.cs() + '*')).get() &&
+ (newlayout = findLayout(context.textclass, t.cs() + '*')) &&
newlayout->isCommand()) {
+ TeXFont const oldFont = context.font;
+ // save the current font size
+ string const size = oldFont.size;
+ // reset the font size to default, because the
+ // font size switches don't affect section
+ // headings and the like
+ context.font.size = known_coded_sizes[0];
+ output_font_change(os, oldFont, context.font);
+ // write the layout
p.get_token();
output_command_layout(os, p, outer, context, newlayout);
+ // set the font size to the original value
+ context.font.size = size;
+ output_font_change(os, oldFont, context.font);
p.skip_spaces();
}
- // The single '=' is meant here.
+ // Section headings and the like
else if (context.new_layout_allowed &&
- (newlayout = findLayout(context.textclass, t.cs())).get() &&
+ (newlayout = findLayout(context.textclass, t.cs())) &&
newlayout->isCommand()) {
+ TeXFont const oldFont = context.font;
+ // save the current font size
+ string const size = oldFont.size;
+ // reset the font size to default, because the font size switches don't
+ // affect section headings and the like
+ context.font.size = known_coded_sizes[0];
+ output_font_change(os, oldFont, context.font);
+ // write the layout
output_command_layout(os, p, outer, context, newlayout);
+ // set the font size to the original value
+ context.font.size = size;
+ output_font_change(os, oldFont, context.font);
+ p.skip_spaces();
+ }
+
+ else if (t.cs() == "caption") {
+ // FIXME: this should get some cleanup. All
+ // the \begin_layout:s are output by the
+ // Context class!
+ p.skip_spaces();
+ context.check_layout(os);
+ p.skip_spaces();
+ begin_inset(os, "Caption\n\n");
+ os << "\\begin_layout "
+ << to_utf8(context.textclass.defaultLayout().name())
+ << '\n';
+ if (p.next_token().character() == '[') {
+ p.get_token(); // eat '['
+ begin_inset(os, "OptArg\n");
+ os << "status collapsed\n";
+ parse_text_in_inset(p, os, FLAG_BRACK_LAST, outer, context);
+ end_inset(os);
+ eat_whitespace(p, os, context, false);
+ }
+ parse_text(p, os, FLAG_ITEM, outer, context);
+ context.check_end_layout(os);
+ // We don't need really a new paragraph, but
+ // we must make sure that the next item gets a \begin_layout.
+ context.new_paragraph(os);
+ end_inset(os);
p.skip_spaces();
+ os << "\\end_layout\n";
}
else if (t.cs() == "includegraphics") {
string const path = getMasterFilePath();
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
- // FIXME UNICODE encoding of name and path may be
- // wrong (makeAbsPath expects utf8)
- if (!fs::exists(makeAbsPath(name, path).toFilesystemEncoding())) {
+ if (!makeAbsPath(name, path).exists()) {
// The file extension is probably missing.
// Now try to find it out.
string const dvips_name =
name = pdftex_name;
}
- // FIXME UNICODE encoding of name and path may be
- // wrong (makeAbsPath expects utf8)
- if (fs::exists(makeAbsPath(name, path).toFilesystemEncoding()))
+ if (makeAbsPath(name, path).exists())
fix_relative_filename(name);
else
cerr << "Warning: Could not find graphics file '"
numberOfbbOptions++;
if (numberOfbbOptions == 4)
os << "\tBoundingBox "
- << opts["bbllx"] << opts["bblly"]
- << opts["bburx"] << opts["bbury"] << '\n';
+ << opts["bbllx"] << " " << opts["bblly"] << " "
+ << opts["bburx"] << " " << opts["bbury"] << '\n';
else if (numberOfbbOptions > 0)
cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
numberOfbbOptions = 0;
numberOfbbOptions++;
if (numberOfbbOptions == 2)
os << "\tBoundingBox 0bp 0bp "
- << opts["natwidth"] << opts["natheight"] << '\n';
+ << opts["natwidth"] << " " << opts["natheight"] << '\n';
else if (numberOfbbOptions > 0)
cerr << "Warning: Ignoring incomplete includegraphics boundingbox arguments.\n";
ostringstream special;
p.skip_spaces();
context.check_layout(os);
string const s = p.verbatim_item();
+ //FIXME: this never triggers in UTF8
if (s == "\xb1" || s == "\xb3" || s == "\xb2" || s == "\xb5")
os << s;
else
else if (t.cs() == "tableofcontents") {
p.skip_spaces();
context.check_layout(os);
- begin_inset(os, "LatexCommand \\tableofcontents\n");
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
end_inset(os);
skip_braces(p); // swallow this
}
else if (t.cs() == "listof") {
p.skip_spaces(true);
- string const name = p.get_token().asString();
+ string const name = p.get_token().cs();
if (context.textclass.floats().typeExist(name)) {
context.check_layout(os);
begin_inset(os, "FloatList ");
eat_whitespace(p, os, context, false);
}
+ else if (t.cs() == "textcolor") {
+ // scheme is \textcolor{color name}{text}
+ string const color = p.verbatim_item();
+ // we only support the predefined colors of the color package
+ if (color == "black" || color == "blue" || color == "cyan"
+ || color == "green" || color == "magenta" || color == "red"
+ || color == "white" || color == "yellow") {
+ context.check_layout(os);
+ os << "\n\\color " << color << "\n";
+ parse_text_snippet(p, os, FLAG_ITEM, outer, context);
+ context.check_layout(os);
+ os << "\n\\color inherit\n";
+ } else
+ // for custom defined colors
+ handle_ert(os, t.asInput() + "{" + color + "}", context);
+ }
+
else if (t.cs() == "underbar") {
// Do NOT handle \underline.
// \underbar cuts through y, g, q, p etc.,
os << "\n\\" << t.cs() << " default\n";
}
+ else if (t.cs() == "lyxline") {
+ context.check_layout(os);
+ os << "\\lyxline";
+ }
+
+ else if (is_known(t.cs(), known_ref_commands)) {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ // LyX cannot handle newlines in a latex command
+ // FIXME: Move the substitution into parser::getOpt()?
+ os << subst(p.getOpt(), "\n", " ");
+ os << "reference " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ end_inset(os);
+ }
+
else if (use_natbib &&
is_known(t.cs(), known_natbib_commands) &&
((t.cs() != "citefullauthor" &&
t.cs() != "citeyearpar") ||
p.next_token().asInput() != "*")) {
context.check_layout(os);
- // tex lyx
- // \citet[before][after]{a} \citet[after][before]{a}
- // \citet[before][]{a} \citet[][before]{a}
- // \citet[after]{a} \citet[after]{a}
- // \citet{a} \citet{a}
- string command = '\\' + t.cs();
+ string command = t.cs();
if (p.next_token().asInput() == "*") {
command += '*';
p.get_token();
}
- if (command == "\\citefullauthor")
+ if (command == "citefullauthor")
// alternative name for "\\citeauthor*"
- command = "\\citeauthor*";
+ command = "citeauthor*";
// text before the citation
string before;
// text after the citation
string after;
+ get_cite_arguments(p, true, before, after);
- boost::tie(before, after) = getCiteArguments(p, true);
- if (command == "\\cite") {
+ if (command == "cite") {
// \cite without optional argument means
// \citet, \cite with at least one optional
// argument means \citep.
if (before.empty() && after.empty())
- command = "\\citet";
+ command = "citet";
else
- command = "\\citep";
+ command = "citep";
}
if (before.empty() && after == "[]")
// avoid \citet[]{a}
before.erase();
after.erase();
}
+ // remove the brackets around after and before
+ if (!after.empty()) {
+ after.erase(0, 1);
+ after.erase(after.length() - 1, 1);
+ // LyX cannot handle newlines in the parameter
+ after = subst(after, "\n", " ");
+ }
+ if (!before.empty()) {
+ before.erase(0, 1);
+ before.erase(before.length() - 1, 1);
+ // LyX cannot handle newlines in the parameter
+ before = subst(before, "\n", " ");
+ }
begin_inset(os, "LatexCommand ");
- os << command << after << before
- << '{' << p.verbatim_item() << "}\n";
+ os << t.cs() << "\n";
+ os << "after " << '"' << after << '"' << "\n";
+ os << "before " << '"' << before << '"' << "\n";
+ os << "key " << '"' << p.verbatim_item() << '"' << "\n";
end_inset(os);
}
else if (use_jurabib &&
is_known(t.cs(), known_jurabib_commands)) {
context.check_layout(os);
- string const command = '\\' + t.cs();
+ string const command = t.cs();
char argumentOrder = '\0';
vector<string> const & options = used_packages["jurabib"];
- if (std::find(options.begin(), options.end(),
+ if (find(options.begin(), options.end(),
"natbiborder") != options.end())
argumentOrder = 'n';
- else if (std::find(options.begin(), options.end(),
+ else if (find(options.begin(), options.end(),
"jurabiborder") != options.end())
argumentOrder = 'j';
string before;
// text after the citation
string after;
+ get_cite_arguments(p, argumentOrder != 'j', before, after);
- boost::tie(before, after) =
- getCiteArguments(p, argumentOrder != 'j');
string const citation = p.verbatim_item();
if (!before.empty() && argumentOrder == '\0') {
cerr << "Warning: Assuming argument order "
"package options if you used an\n"
"earlier jurabib version." << endl;
}
+ if (!after.empty()) {
+ after.erase(0, 1);
+ after.erase(after.length() - 1, 1);
+ }
+ if (!before.empty()) {
+ before.erase(0, 1);
+ before.erase(before.length() - 1, 1);
+ }
begin_inset(os, "LatexCommand ");
- os << command << after << before
- << '{' << citation << "}\n";
+ os << t.cs() << "\n";
+ os << "after " << '"' << after << '"' << "\n";
+ os << "before " << '"' << before << '"' << "\n";
+ os << "key " << '"' << citation << '"' << "\n";
end_inset(os);
}
- else if (is_known(t.cs(), known_latex_commands)) {
- // This needs to be after the check for natbib and
- // jurabib commands, because "cite" has different
- // arguments with natbib and jurabib.
+ else if (t.cs() == "cite") {
context.check_layout(os);
+ // LyX cannot handle newlines in a latex command
+ string after = subst(p.getOptContent(), "\n", " ");
begin_inset(os, "LatexCommand ");
- os << '\\' << t.cs();
- // lyx cannot handle newlines in a latex command
- // FIXME: Move the substitution into parser::getOpt()?
- os << subst(p.getOpt(), "\n", " ");
- os << subst(p.getOpt(), "\n", " ");
- os << '{' << subst(p.verbatim_item(), "\n", " ") << "}\n";
+ os << t.cs() << "\n";
+ os << "after " << '"' << after << '"' << "\n";
+ os << "key " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "index") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ // LyX cannot handle newlines in a latex command
+ os << "name " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "nomenclature") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ // LyX cannot handle newlines in a latex command
+ string prefix = subst(p.getOptContent(), "\n", " ");
+ if (!prefix.empty())
+ os << "prefix " << '"' << prefix << '"' << "\n";
+ os << "symbol " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ os << "description " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "label") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ // LyX cannot handle newlines in a latex command
+ os << "name " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
+ end_inset(os);
+ }
+
+ else if (t.cs() == "printindex") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ end_inset(os);
+ skip_braces(p);
+ }
+
+ else if (t.cs() == "printnomenclature") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ end_inset(os);
+ skip_braces(p);
+ }
+
+ else if (t.cs() == "url") {
+ context.check_layout(os);
+ begin_inset(os, "LatexCommand ");
+ os << t.cs() << "\n";
+ // LyX cannot handle newlines in a latex command
+ os << "target " << '"' << subst(p.verbatim_item(), "\n", " ") << '"' << "\n";
end_inset(os);
}
char const * const * where = is_known(t.cs(), known_sizes);
context.check_layout(os);
TeXFont const oldFont = context.font;
- context.font.size = known_coded_sizes[where - known_sizes];
+ // the font size index differs by 1, because the known_coded_sizes
+ // has additionally a "default" entry
+ context.font.size = known_coded_sizes[where - known_sizes + 1];
output_font_change(os, oldFont, context.font);
eat_whitespace(p, os, context, false);
}
eat_whitespace(p, os, context, false);
}
+ else if (t.cs() == "selectlanguage") {
+ context.check_layout(os);
+ // save the language for the case that a
+ // \foreignlanguage is used
+
+ context.font.language = subst(p.verbatim_item(), "\n", " ");
+ os << "\\lang " << context.font.language << "\n";
+ }
+
+ else if (t.cs() == "foreignlanguage") {
+ context.check_layout(os);
+ os << "\n\\lang " << subst(p.verbatim_item(), "\n", " ") << "\n";
+ os << subst(p.verbatim_item(), "\n", " ");
+ // FIXME: the second argument of selectlanguage
+ // has to be parsed (like for \textsf, for
+ // example).
+ // set back to last selectlanguage
+ os << "\n\\lang " << context.font.language << "\n";
+ }
+
+ else if (t.cs() == "inputencoding") {
+ // nothing to write here
+ string const enc = subst(p.verbatim_item(), "\n", " ");
+ p.setEncoding(enc);
+ }
+
else if (t.cs() == "LyX" || t.cs() == "TeX"
|| t.cs() == "LaTeX") {
context.check_layout(os);
skip_braces(p);
}
+ else if (t.cs() == "textquotedbl") {
+ context.check_layout(os);
+ os << "\"";
+ skip_braces(p);
+ }
+
else if (t.cs() == "@" && p.next_token().asInput() == ".") {
context.check_layout(os);
os << "\\SpecialChar \\@.\n";
handle_ert(os, oss.str(), context);
}
- else if (t.cs() == "\"") {
- context.check_layout(os);
- string const name = p.verbatim_item();
- if (name == "a") os << '\xe4';
- else if (name == "o") os << '\xf6';
- else if (name == "u") os << '\xfc';
- else if (name == "A") os << '\xc4';
- else if (name == "O") os << '\xd6';
- else if (name == "U") os << '\xdc';
- else handle_ert(os, "\"{" + name + "}", context);
- }
-
// Problem: \= creates a tabstop inside the tabbing environment
// and else an accent. In the latter case we really would want
// \={o} instead of \= o.
else if (t.cs() == "=" && (flags & FLAG_TABBING))
handle_ert(os, t.asInput(), context);
- else if (t.cs() == "H" || t.cs() == "c" || t.cs() == "^"
- || t.cs() == "'" || t.cs() == "`"
- || t.cs() == "~" || t.cs() == "." || t.cs() == "=") {
- // we need the trim as the LyX parser chokes on such spaces
- // The argument of InsetLatexAccent is parsed as a
- // subset of LaTeX, so don't parse anything here,
- // but use the raw argument.
- // Otherwise we would convert \~{\i} wrongly.
- // This will of course not translate \~{\ss} to \~{ß},
- // but that does at least compile and does only look
- // strange on screen.
- context.check_layout(os);
- os << "\\i \\" << t.cs() << "{"
- << trim(p.verbatim_item(), " ")
- << "}\n";
- }
-
- else if (t.cs() == "ss") {
- context.check_layout(os);
- os << "\xdf";
- skip_braces(p); // eat {}
- }
-
- else if (t.cs() == "i" || t.cs() == "j" || t.cs() == "l" ||
- t.cs() == "L") {
+ // accents (see Table 6 in Comprehensive LaTeX Symbol List)
+ else if (t.cs().size() == 1
+ && contains("\"'.=^`bcdHkrtuv~", t.cs())) {
context.check_layout(os);
- os << "\\i \\" << t.cs() << "{}\n";
- skip_braces(p); // eat {}
+ // try to see whether the string is in unicodesymbols
+ docstring rem;
+ string command = t.asInput() + "{"
+ + trim(p.verbatim_item())
+ + "}";
+ docstring s = encodings.fromLaTeXCommand(from_utf8(command), rem);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << command
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ os << to_utf8(s);
+ } else
+ // we did not find a non-ert version
+ handle_ert(os, command, context);
}
else if (t.cs() == "\\") {
}
}
+ else if (t.cs() == "newline") {
+ context.check_layout(os);
+ os << "\n\\" << t.cs() << "\n";
+ skip_braces(p); // eat {}
+ }
+
else if (t.cs() == "input" || t.cs() == "include"
|| t.cs() == "verbatiminput") {
string name = '\\' + t.cs();
string const path = getMasterFilePath();
// We want to preserve relative / absolute filenames,
// therefore path is only used for testing
- // FIXME UNICODE encoding of filename and path may be
- // wrong (makeAbsPath expects utf8)
if ((t.cs() == "include" || t.cs() == "input") &&
- !fs::exists(makeAbsPath(filename, path).toFilesystemEncoding())) {
+ !makeAbsPath(filename, path).exists()) {
// The file extension is probably missing.
// Now try to find it out.
string const tex_name =
if (!tex_name.empty())
filename = tex_name;
}
- // FIXME UNICODE encoding of filename and path may be
- // wrong (makeAbsPath expects utf8)
- if (fs::exists(makeAbsPath(filename, path).toFilesystemEncoding())) {
+ if (makeAbsPath(filename, path).exists()) {
string const abstexname =
- makeAbsPath(filename, path).absFilename();
+ makeAbsPath(filename, path).absFileName();
string const abslyxname =
changeExtension(abstexname, ".lyx");
fix_relative_filename(filename);
string const lyxname =
changeExtension(filename, ".lyx");
if (t.cs() != "verbatiminput" &&
- tex2lyx(abstexname, FileName(abslyxname))) {
+ tex2lyx(abstexname, FileName(abslyxname),
+ p.getEncoding())) {
os << name << '{' << lyxname << "}\n";
} else {
os << name << '{' << filename << "}\n";
else if (t.cs() == "bibliography") {
context.check_layout(os);
begin_inset(os, "LatexCommand ");
- os << "\\bibtex";
+ os << "bibtex" << "\n";
+ os << "bibfiles " << '"' << p.verbatim_item() << '"' << "\n";
// Do we have a bibliographystyle set?
- if (!bibliographystyle.empty()) {
- os << '[' << bibliographystyle << ']';
- }
- os << '{' << p.verbatim_item() << "}\n";
+ if (!bibliographystyle.empty())
+ os << "options " << '"' << bibliographystyle << '"' << "\n";
end_inset(os);
}
else if (t.cs() == "parbox")
parse_box(p, os, FLAG_ITEM, outer, context, true);
+
+ //\makebox() is part of the picture environment and different from \makebox{}
+ //\makebox{} will be parsed by parse_box when bug 2956 is fixed
+ else if (t.cs() == "makebox") {
+ string arg = t.asInput();
+ if (p.next_token().character() == '(')
+ //the syntax is: \makebox(x,y)[position]{content}
+ arg += p.getFullParentheseArg();
+ else
+ //the syntax is: \makebox[width][position]{content}
+ arg += p.getFullOpt();
+ handle_ert(os, arg + p.getFullOpt(), context);
+ }
else if (t.cs() == "smallskip" ||
t.cs() == "medskip" ||
else if (is_known(t.cs(), known_spaces)) {
char const * const * where = is_known(t.cs(), known_spaces);
context.check_layout(os);
- begin_inset(os, "InsetSpace ");
+ os << "\\InsetSpace ";
os << '\\' << known_coded_spaces[where - known_spaces]
<< '\n';
// LaTeX swallows whitespace after all spaces except
}
else if (t.cs() == "newpage" ||
- t.cs() == "clearpage" ||
- t.cs() == "cleardoublepage") {
+ t.cs() == "clearpage" ||
+ t.cs() == "cleardoublepage") {
context.check_layout(os);
- // FIXME: what about \\pagebreak?
os << "\n\\" << t.cs() << "\n";
skip_braces(p); // eat {}
}
string const ert = name + '{' + command + '}' +
opt1 + opt2 +
'{' + p.verbatim_item() + '}';
- handle_ert(os, ert, context);
- }
+ context.check_layout(os);
+ begin_inset(os, "FormulaMacro");
+ os << "\n" << ert;
+ end_inset(os);
+ }
+
else if (t.cs() == "vspace") {
bool starred = false;
if (p.next_token().asInput() == "*") {
}
else {
+ // try to see whether the string is in unicodesymbols
+ docstring rem;
+ docstring s = encodings.fromLaTeXCommand(from_utf8(t.asInput()), rem);
+ if (!s.empty()) {
+ if (!rem.empty())
+ cerr << "When parsing " << t.cs()
+ << ", result is " << to_utf8(s)
+ << "+" << to_utf8(rem) << endl;
+ context.check_layout(os);
+ os << to_utf8(s);
+ p.skip_spaces();
+ skip_braces(p); // eat {}
+ }
//cerr << "#: " << t << " mode: " << mode << endl;
// heuristic: read up to next non-nested space
/*
cerr << "found ERT: " << s << endl;
handle_ert(os, s + ' ', context);
*/
- string name = t.asInput();
- if (p.next_token().asInput() == "*") {
- // Starred commands like \vspace*{}
- p.get_token(); // Eat '*'
- name += '*';
+ else {
+ string name = t.asInput();
+ if (p.next_token().asInput() == "*") {
+ // Starred commands like \vspace*{}
+ p.get_token(); // Eat '*'
+ name += '*';
+ }
+ if (!parse_command(name, p, os, outer, context))
+ handle_ert(os, name, context);
}
- if (! parse_command(name, p, os, outer, context))
- handle_ert(os, name, context);
}
if (flags & FLAG_LEAVE) {