X-Git-Url: https://git.lyx.org/gitweb/?a=blobdiff_plain;f=src%2FLexer.cpp;h=c290260144aa8b740c37de3a9dfbd0ab92d28608;hb=294e4884ee29585d311177406cd31499e6d81877;hp=08b2f08077048585d44a09c37fcaefcfaaa7d937;hpb=e1453ef6873b8c6d371a62471c5d309154133866;p=lyx.git diff --git a/src/Lexer.cpp b/src/Lexer.cpp index 08b2f08077..c290260144 100644 --- a/src/Lexer.cpp +++ b/src/Lexer.cpp @@ -4,7 +4,7 @@ * Licence details can be found in the file COPYING. * * \author Alejandro Aguilar Sierra - * \author Lars Gullik Bjønnes + * \author Lars Gullik Bjønnes * \author Jean-Marc Lasgouttes * \author John Levon * @@ -14,12 +14,14 @@ #include #include "Lexer.h" +#include "Format.h" #include "support/convert.h" #include "support/debug.h" #include "support/FileName.h" #include "support/filetools.h" #include "support/gzstream.h" +#include "support/lassert.h" #include "support/lstrings.h" #include "support/lyxalgo.h" #include "support/types.h" @@ -100,10 +102,10 @@ public: int lineno; /// string pushTok; - /// - char commentChar; /// used for error messages string context; + /// + char commentChar; private: /// non-copyable Pimpl(Pimpl const &); @@ -116,7 +118,7 @@ private: public: /// PushedTable() - : table_elem(0), table_siz(0) {} + : table_elem(nullptr), table_siz(0) {} /// PushedTable(LexerKeyword * ki, int siz) : table_elem(ki), table_siz(siz) {} @@ -130,23 +132,19 @@ private: }; - namespace { -class CompareTags - : public binary_function { -public: - // used by lower_bound, sort and sorted - bool operator()(LexerKeyword const & a, LexerKeyword const & b) const - { - // we use the ascii version, because in turkish, 'i' - // is not the lowercase version of 'I', and thus - // turkish locale breaks parsing of tags. - return compare_ascii_no_case(a.tag, b.tag) < 0; - } -}; +// used by lower_bound, sort and sorted +bool compareTags(LexerKeyword const & a, LexerKeyword const & b) +{ + // we use the ascii version, because in turkish, 'i' + // is not the lowercase version of 'I', and thus + // turkish locale breaks parsing of tags. + return compare_ascii_no_case(a.tag, b.tag) < 0; +} + +} // namespace -} // end of anon namespace Lexer::Pimpl::Pimpl(LexerKeyword * tab, int num) @@ -194,14 +192,14 @@ void Lexer::Pimpl::verifyTable() { // Check if the table is sorted and if not, sort it. if (table - && !lyx::sorted(table, table + no_items, CompareTags())) { + && !lyx::sorted(table, table + no_items, &compareTags)) { lyxerr << "The table passed to Lexer is not sorted!\n" << "Tell the developers to fix it!" << endl; // We sort it anyway to avoid problems. lyxerr << "\nUnsorted:" << endl; printTable(lyxerr); - sort(table, table + no_items, CompareTags()); + sort(table, table + no_items, &compareTags); lyxerr << "\nSorted:" << endl; printTable(lyxerr); } @@ -237,9 +235,7 @@ void Lexer::Pimpl::popTable() bool Lexer::Pimpl::setFile(FileName const & filename) { // Check the format of the file. - string const format = filename.guessFormatFromContents(); - - if (format == "gzip" || format == "zip" || format == "compress") { + if (theFormats().isZippedFile(filename)) { LYXERR(Debug::LYXLEX, "lyxlex: compressed"); // The check only outputs a debug message, because it triggers // a bug in compaq cxx 6.2, where is_open() returns 'true' for @@ -249,9 +245,10 @@ bool Lexer::Pimpl::setFile(FileName const & filename) "file or stream already set."); gz_.open(filename.toFilesystemEncoding().c_str(), ios::in); is.rdbuf(&gz_); - name = filename.absFilename(); + name = filename.absFileName(); lineno = 0; - return gz_.is_open() && is.good(); + if (!gz_.is_open() || !is.good()) + return false; } else { LYXERR(Debug::LYXLEX, "lyxlex: UNcompressed"); @@ -262,12 +259,25 @@ bool Lexer::Pimpl::setFile(FileName const & filename) LYXERR(Debug::LYXLEX, "Error in Lexer::setFile: " "file or stream already set."); } - fb_.open(filename.toFilesystemEncoding().c_str(), ios::in); + fb_.open(filename.toSafeFilesystemEncoding().c_str(), ios::in); is.rdbuf(&fb_); - name = filename.absFilename(); + name = filename.absFileName(); lineno = 0; - return fb_.is_open() && is.good(); + if (!fb_.is_open() || !is.good()) + return false; } + + // Skip byte order mark. + if (is.peek() == 0xef) { + is.get(); + if (is.peek() == 0xbb) { + is.get(); + LASSERT(is.get() == 0xbf, /**/); + } else + is.unget(); + } + + return true; } @@ -306,12 +316,11 @@ bool Lexer::Pimpl::next(bool esc /* = false */) } - unsigned char c = 0; // getc() returns an int char cc = 0; status = 0; while (is && !status) { is.get(cc); - c = cc; + unsigned char c = cc; if (c == commentChar) { // Read rest of line (fast :-) @@ -334,9 +343,8 @@ bool Lexer::Pimpl::next(bool esc /* = false */) if (esc) { - bool escaped = false; do { - escaped = false; + bool escaped = false; is.get(cc); c = cc; if (c == '\r') continue; @@ -428,7 +436,7 @@ int Lexer::Pimpl::searchKeyword(char const * const tag) const LexerKeyword search_tag = { tag, 0 }; LexerKeyword * res = lower_bound(table, table + no_items, - search_tag, CompareTags()); + search_tag, &compareTags); // use the compare_ascii_no_case instead of compare_no_case, // because in turkish, 'i' is not the lowercase version of 'I', // and thus turkish locale breaks parsing of tags. @@ -458,7 +466,7 @@ bool Lexer::Pimpl::eatLine() is.get(cc); c = cc; //LYXERR(Debug::LYXLEX, "Lexer::EatLine read char: `" << c << '\''); - if (c != '\r') + if (c != '\r' && is) buff.push_back(c); } @@ -499,7 +507,7 @@ bool Lexer::Pimpl::nextToken() char cc = 0; is.get(cc); c = cc; - if (c >= ' ' && is) { + if ((c >= ' ' || c == '\t') && is) { buff.clear(); if (c == '\\') { // first char == '\\' @@ -513,7 +521,7 @@ bool Lexer::Pimpl::nextToken() buff.push_back(c); is.get(cc); c = cc; - } while (c >= ' ' && c != '\\' && is); + } while ((c >= ' ' || c == '\t') && c != '\\' && is); } if (c == '\\') @@ -555,7 +563,7 @@ void Lexer::Pimpl::pushToken(string const & pt) ////////////////////////////////////////////////////////////////////// Lexer::Lexer() - : pimpl_(new Pimpl(0, 0)) + : pimpl_(new Pimpl(nullptr, 0)), lastReadOk_(false) {} @@ -636,6 +644,7 @@ void Lexer::setCommentChar(char c) pimpl_->setCommentChar(c); } + int Lexer::lex() { return pimpl_->lex(); @@ -680,23 +689,23 @@ double Lexer::getFloat() const } -string const Lexer::getString() const +string const Lexer::getString(bool trim) const { lastReadOk_ = pimpl_->status == LEX_DATA || pimpl_->status == LEX_TOKEN; if (lastReadOk_) - return pimpl_->getString(); + return trim ? support::trim(pimpl_->getString(), "\t ") : pimpl_->getString(); return string(); } -docstring const Lexer::getDocString() const +docstring const Lexer::getDocString(bool trim) const { lastReadOk_ = pimpl_->status == LEX_DATA || pimpl_->status == LEX_TOKEN; if (lastReadOk_) - return pimpl_->getDocString(); + return trim ? support::trim(pimpl_->getDocString(), "\t ") : pimpl_->getDocString(); return docstring(); } @@ -705,28 +714,27 @@ docstring const Lexer::getDocString() const // I would prefer to give a tag number instead of an explicit token // here, but it is not possible because Buffer::readDocument uses // explicit tokens (JMarc) -string const Lexer::getLongString(string const & endtoken) +docstring Lexer::getLongString(docstring const & endtoken) { - string str; - string prefix; + docstring str; + docstring prefix; bool firstline = true; while (pimpl_->is) { //< eatLine only reads from is, not from pushTok if (!eatLine()) // blank line in the file being read continue; + docstring tmpstr = getDocString(); + docstring const token = trim(tmpstr, " \t"); - string const token = trim(getString(), " \t"); - - LYXERR(Debug::PARSER, "LongString: `" << getString() << '\''); + LYXERR(Debug::PARSER, "LongString: `" << tmpstr << '\''); // We do a case independent comparison, like searchKeyword does. - if (compare_ascii_no_case(token, endtoken) == 0) + if (compare_no_case(token, endtoken) == 0) break; - string tmpstr = getString(); if (firstline) { - size_t i = tmpstr.find_first_not_of(' '); + size_t i = tmpstr.find_first_not_of(from_ascii(" \t")); if (i != string::npos) prefix = tmpstr.substr(0, i); firstline = false; @@ -735,14 +743,14 @@ string const Lexer::getLongString(string const & endtoken) // further lines in long strings may have the same // whitespace prefix as the first line. Remove it. - if (prefix.length() && prefixIs(tmpstr, prefix)) - tmpstr.erase(0, prefix.length() - 1); + if (!prefix.empty() && prefixIs(tmpstr, prefix)) + tmpstr.erase(0, prefix.length()); - str += ltrim(tmpstr, "\t") + '\n'; + str += tmpstr + '\n'; } if (!pimpl_->is) - printError("Long string not ended by `" + endtoken + '\''); + printError("Long string not ended by `" + to_utf8(endtoken) + '\''); return str; } @@ -750,7 +758,7 @@ string const Lexer::getLongString(string const & endtoken) bool Lexer::getBool() const { - string const s = pimpl_->getString(); + string const s = pimpl_->getString(); if (s == "false" || s == "0") { lastReadOk_ = true; return false; @@ -796,7 +804,7 @@ Lexer::operator void const *() const // use fail() here. However, our implementation of getString() et al. // can cause the eof() and fail() bits to be set, even though we // haven't tried to read 'em. - return lastReadOk_? this : 0; + return lastReadOk_? this : nullptr; } @@ -900,12 +908,24 @@ string Lexer::quoteString(string const & arg) } +// same for docstring +docstring Lexer::quoteString(docstring const & arg) +{ + docstring res; + res += '"'; + res += subst(subst(arg, from_ascii("\\"), from_ascii("\\\\")), + from_ascii("\""), from_ascii("\\\"")); + res += '"'; + return res; +} + + Lexer & Lexer::operator>>(char const * required) { string token; *this >> token; if (token != required) { - LYXERR0("Missing '" << required << "'-tag in " << pimpl_->context + LYXERR0("Missing '" << required << "'-tag in " << pimpl_->context << ". Got " << token << " instead. Line: " << lineNumber()); pushToken(token); }