// Wrapper
//
+bool iparserdocstream::setEncoding(std::string const & e)
+{
+ is_ << lyx::setEncoding(e);
+ if (s_.empty())
+ return true;
+ cerr << "Setting encoding " << e << " too late. The encoding of `"
+ << to_utf8(s_) << "ยด is wrong." << std::endl;
+ return false;
+}
+
+
void iparserdocstream::putback(char_type c)
{
s_ += c;
}
-void Parser::setEncoding(std::string const & e, int const & p)
+bool Parser::setEncoding(std::string const & e, int const & p)
{
// We may (and need to) use unsafe encodings here: Since the text is
// converted to unicode while reading from is_, we never see text in
Encoding const * const enc = encodings.fromLaTeXName(e, p, true);
if (!enc) {
cerr << "Unknown encoding " << e << ". Ignoring." << std::endl;
- return;
+ return false;
}
- setEncoding(enc->iconvName());
+ return setEncoding(enc->iconvName());
}
}
-void Parser::setEncoding(std::string const & e)
+bool Parser::setEncoding(std::string const & e)
{
//cerr << "setting encoding to " << e << std::endl;
- is_.docstream() << lyx::setEncoding(e);
encoding_iconv_ = e;
+ return is_.setEncoding(e);
}
Token const Parser::next_token()
{
static const Token dummy;
- return good() ? tokens_[pos_] : dummy;
+ if (!good())
+ return dummy;
+ if (pos_ >= tokens_.size())
+ tokenize_one();
+ return pos_ < tokens_.size() ? tokens_[pos_] : dummy;
}
Token const Parser::next_next_token()
{
static const Token dummy;
- // If good() has not been called after the last get_token() we need
- // to tokenize two more tokens.
- if (pos_ + 1 >= tokens_.size()) {
- tokenize_one();
+ if (!good())
+ return dummy;
+ // If tokenize_one() has not been called after the last get_token() we
+ // need to tokenize two more tokens.
+ if (pos_ >= tokens_.size()) {
tokenize_one();
+ if (pos_ + 1 >= tokens_.size())
+ tokenize_one();
}
return pos_ + 1 < tokens_.size() ? tokens_[pos_ + 1] : dummy;
}
Token const Parser::get_token()
{
static const Token dummy;
- // if (good())
- // cerr << "looking at token " << tokens_[pos_]
- // << " pos: " << pos_ << '\n';
- return good() ? tokens_[pos_++] : dummy;
+ if (!good())
+ return dummy;
+ if (pos_ >= tokens_.size()) {
+ tokenize_one();
+ if (pos_ >= tokens_.size())
+ return dummy;
+ }
+ // cerr << "looking at token " << tokens_[pos_]
+ // << " pos: " << pos_ << '\n';
+ return tokens_[pos_++];
}
{
if (pos_ < tokens_.size())
return true;
- tokenize_one();
- return pos_ < tokens_.size();
-}
-
-
-char Parser::getChar()
-{
- if (!good())
- error("The input stream is not well...");
- return get_token().character();
+ if (!is_.good())
+ return false;
+ return is_.peek() != idocstream::traits_type::eof();
}
putback();
return make_pair(false, string());
} else {
- for (t = get_token(); good(); t = get_token()) {
+ while (good()) {
+ t = get_token();
// Ignore comments
if (t.cat() == catComment) {
if (!t.cs().empty())