#include "Encoding.h"
-#include "debug.h"
#include "LaTeXFeatures.h"
#include "Lexer.h"
#include "LyXRC.h"
+#include "support/debug.h"
#include "support/FileName.h"
#include "support/lstrings.h"
#include "support/unicode.h"
#include <sstream>
-#ifndef CXX_GLOBAL_CSTD
-using std::strtol;
-#endif
-using std::endl;
-using std::string;
-
+using namespace std;
+using namespace lyx::support;
namespace lyx {
-using support::FileName;
-
Encodings encodings;
namespace {
};
-typedef std::map<char_type, CharInfo> CharInfoMap;
+typedef map<char_type, CharInfo> CharInfoMap;
CharInfoMap unicodesymbols;
} // namespace anon
+EncodingException::EncodingException(char_type c)
+ : failed_char(c), par_id(0), pos(0)
+{
+}
+
+
+const char * EncodingException::what() const throw()
+{
+ return "Could not find LaTeX command for a character";
+}
+
+
Encoding::Encoding(string const & n, string const & l, string const & i,
bool f, Encoding::Package p)
: Name_(n), LatexName_(l), iconvName_(i), fixedwidth_(f), package_(p)
// We do not need to check all UCS4 code points, it is enough
// if we check all 256 code points of this encoding.
for (unsigned short j = 0; j < 256; ++j) {
- char const c = j;
- std::vector<char_type> const ucs4 = eightbit_to_ucs4(&c, 1, iconvName_);
+ char const c = char(j);
+ vector<char_type> const ucs4 = eightbit_to_ucs4(&c, 1, iconvName_);
if (ucs4.size() == 1) {
char_type const c = ucs4[0];
CharInfoMap::const_iterator const it = unicodesymbols.find(c);
// therefore we need to check all UCS4 code points.
// This is expensive!
for (char_type c = 0; c < max_ucs4; ++c) {
- std::vector<char> const eightbit = ucs4_to_eightbit(&c, 1, iconvName_);
+ vector<char> const eightbit = ucs4_to_eightbit(&c, 1, iconvName_);
if (!eightbit.empty()) {
CharInfoMap::const_iterator const it = unicodesymbols.find(c);
if (it == unicodesymbols.end() || !it->second.force)
// c cannot be encoded in this encoding
CharInfoMap::const_iterator const it = unicodesymbols.find(c);
if (it == unicodesymbols.end())
- lyxerr << "Could not find LaTeX command for character 0x"
- << std::hex << c << std::dec
- << ".\nLaTeX export will fail."
- << endl;
+ throw EncodingException(c);
else
return it->second.command;
}
}
+set<char_type> Encoding::getSymbolsList()
+{
+ // assure the used encoding is properly initialized
+ if (!complete_)
+ init();
+ BOOST_ASSERT(complete_);
+
+ // first all encodable characters
+ CharSet symbols = encodable_;
+ // add those below start_encodable_
+ for (char_type c = 0; c < start_encodable_; ++c)
+ symbols.insert(c);
+ // now the ones from the unicodesymbols file
+ CharInfoMap::const_iterator const end = unicodesymbols.end();
+ for (CharInfoMap::const_iterator it = unicodesymbols.begin(); it != end; ++it) {
+ symbols.insert(it->first);
+ }
+ return symbols;
+}
+
+
void Encodings::validate(char_type c, LaTeXFeatures & features)
{
CharInfoMap::const_iterator const it = unicodesymbols.find(c);
Encoding const * Encodings::getFromLaTeXName(string const & name) const
{
- // We don't use std::find_if because it makes copies of the pairs in
+ // We don't use find_if because it makes copies of the pairs in
// the map.
// This linear search is OK since we don't have many encodings.
// Users could even optimize it by putting the encodings they use
string flags;
if (symbolslex.next(true)) {
- std::istringstream is(symbolslex.getString());
+ istringstream is(symbolslex.getString());
// reading symbol directly does not work if
- // char_type == std::wchar_t.
+ // char_type == wchar_t.
boost::uint32_t tmp;
- if(!(is >> std::hex >> tmp))
+ if(!(is >> hex >> tmp))
break;
symbol = tmp;
} else
info.force = false;
while (!flags.empty()) {
string flag;
- flags = support::split(flags, flag, ',');
+ flags = split(flags, flag, ',');
if (flag == "combining")
info.combining = true;
else if (flag == "force")
else
lyxerr << "Ignoring unknown flag `" << flag
<< "' for symbol `0x"
- << std::hex << symbol << std::dec
+ << hex << symbol << dec
<< "'." << endl;
}