* Licence details can be found in the file COPYING.
*
* \author Alejandro Aguilar Sierra
+ * \author Richard Heck (BibTeX parser improvements)
*
* Full author contact details are available in file CREDITS.
*/
#include "Buffer.h"
#include "BufferParams.h"
#include "DispatchResult.h"
-#include "debug.h"
+#include "EmbeddedFiles.h"
#include "Encoding.h"
#include "FuncRequest.h"
-#include "gettext.h"
#include "LaTeXFeatures.h"
#include "MetricsInfo.h"
#include "OutputParams.h"
+#include "TextClass.h"
#include "frontends/alert.h"
+#include "support/debug.h"
+#include "support/ExceptionMessage.h"
+#include "support/docstream.h"
#include "support/filetools.h"
+#include "support/gettext.h"
#include "support/lstrings.h"
-#include "support/lyxlib.h"
#include "support/os.h"
#include "support/Path.h"
#include "support/textutils.h"
#include <boost/tokenizer.hpp>
+#include <limits>
+using namespace std;
+using namespace lyx::support;
namespace lyx {
-using support::absolutePath;
-using support::ascii_lowercase;
-using support::changeExtension;
-using support::contains;
-using support::copy;
-using support::DocFileName;
-using support::FileName;
-using support::findtexfile;
-using support::isFileReadable;
-using support::isValidLaTeXFilename;
-using support::latex_path;
-using support::ltrim;
-using support::makeAbsPath;
-using support::makeRelPath;
-using support::prefixIs;
-using support::removeExtension;
-using support::rtrim;
-using support::split;
-using support::subst;
-using support::tokenPos;
-using support::trim;
-using support::lowercase;
-
namespace Alert = frontend::Alert;
namespace os = support::os;
-using std::endl;
-using std::getline;
-using std::string;
-using std::ostream;
-using std::pair;
-using std::vector;
-using std::map;
-
InsetBibtex::InsetBibtex(InsetCommandParams const & p)
: InsetCommand(p, "bibtex")
{}
-std::auto_ptr<Inset> InsetBibtex::doClone() const
+ParamInfo const & InsetBibtex::findInfo(string const & /* cmdName */)
+{
+ static ParamInfo param_info_;
+ if (param_info_.empty()) {
+ param_info_.add("btprint", ParamInfo::LATEX_OPTIONAL);
+ param_info_.add("bibfiles", ParamInfo::LATEX_REQUIRED);
+ param_info_.add("embed", ParamInfo::LYX_INTERNAL);
+ param_info_.add("options", ParamInfo::LYX_INTERNAL);
+ }
+ return param_info_;
+}
+
+
+Inset * InsetBibtex::clone() const
{
- return std::auto_ptr<Inset>(new InsetBibtex(*this));
+ return new InsetBibtex(*this);
}
switch (cmd.action) {
case LFUN_INSET_MODIFY: {
- InsetCommandParams p("bibtex");
- InsetCommandMailer::string2params("bibtex", to_utf8(cmd.argument()), p);
- if (!p.getCmdName().empty()) {
- setParams(p);
- cur.buffer().updateBibfilesCache();
- } else
- cur.noUpdate();
+ InsetCommandParams p(BIBTEX_CODE);
+ try {
+ if (!InsetCommandMailer::string2params("bibtex",
+ to_utf8(cmd.argument()), p)) {
+ cur.noUpdate();
+ break;
+ }
+ } catch (ExceptionMessage const & message) {
+ if (message.type_ == WarningException) {
+ Alert::warning(message.title_, message.details_);
+ cur.noUpdate();
+ } else
+ throw message;
+ break;
+ }
+ //
+ InsetCommandParams orig = params();
+ // returned "embed" is composed of "true" or "false", which needs to be adjusted
+ string tmp;
+ string emb;
+
+ string newBibfiles;
+ string newEmbedStatus;
+
+ string bibfiles = to_utf8(p["bibfiles"]);
+ string embedStatus = to_utf8(p["embed"]);
+
+ bibfiles = split(bibfiles, tmp, ',');
+ embedStatus = split(embedStatus, emb, ',');
+ while (!tmp.empty()) {
+ EmbeddedFile file(changeExtension(tmp, "bib"), buffer().filePath());
+ if (!newBibfiles.empty())
+ newBibfiles += ",";
+ newBibfiles += tmp;
+ if (!newEmbedStatus.empty())
+ newEmbedStatus += ",";
+ if (emb == "true")
+ newEmbedStatus += file.inzipName();
+ // Get next file name
+ bibfiles = split(bibfiles, tmp, ',');
+ embedStatus = split(embedStatus, emb, ',');
+ }
+ LYXERR(Debug::FILES, "Update parameters from " << p["bibfiles"]
+ << " " << p["embed"] << " to " << newBibfiles << " "
+ << newEmbedStatus);
+ p["bibfiles"] = from_utf8(newBibfiles);
+ p["embed"] = from_utf8(newEmbedStatus);
+
+ setParams(p);
+ try {
+ // test parameter and copy files
+ embeddedFiles();
+ } catch (ExceptionMessage const & message) {
+ Alert::error(message.title_, message.details_);
+ // do not set parameter if an error happens
+ setParams(orig);
+ break;
+ }
+ buffer().updateBibfilesCache();
break;
}
}
-docstring const InsetBibtex::getScreenLabel(Buffer const &) const
+docstring InsetBibtex::screenLabel() const
{
return _("BibTeX Generated Bibliography");
}
-namespace {
-
-string normalize_name(Buffer const & buffer, OutputParams const & runparams,
- string const & name, string const & ext)
+static string normalizeName(Buffer const & buffer,
+ OutputParams const & runparams, string const & name, string const & ext)
{
string const fname = makeAbsPath(name, buffer.filePath()).absFilename();
- if (absolutePath(name) || !isFileReadable(FileName(fname + ext)))
+ if (FileName(name).isAbsolute() || !FileName(fname + ext).isReadableFile())
return name;
- else if (!runparams.nice)
+ if (!runparams.nice)
return fname;
- else
- // FIXME UNICODE
- return to_utf8(makeRelPath(from_utf8(fname),
- from_utf8(buffer.getMasterBuffer()->filePath())));
-}
+ // FIXME UNICODE
+ return to_utf8(makeRelPath(from_utf8(fname),
+ from_utf8(buffer.masterBuffer()->filePath())));
}
-int InsetBibtex::latex(Buffer const & buffer, odocstream & os,
- OutputParams const & runparams) const
+int InsetBibtex::latex(odocstream & os, OutputParams const & runparams) const
{
// the sequence of the commands:
// 1. \bibliographystyle{style}
// use such filenames.)
// Otherwise, store the (maybe absolute) path to the original,
// unmangled database name.
- typedef boost::char_separator<char_type> Separator;
- typedef boost::tokenizer<Separator, docstring::const_iterator, docstring> Tokenizer;
-
- Separator const separator(from_ascii(",").c_str());
- // The tokenizer must not be called with temporary strings, since
- // it does not make a copy and uses iterators of the string further
- // down. getParam returns a reference, so this is OK.
- Tokenizer const tokens(getParam("bibfiles"), separator);
- Tokenizer::const_iterator const begin = tokens.begin();
- Tokenizer::const_iterator const end = tokens.end();
-
+ EmbeddedFileList const bibs = embeddedFiles();
+ EmbeddedFileList::const_iterator it = bibs.begin();
+ EmbeddedFileList::const_iterator it_end = bibs.end();
odocstringstream dbs;
- for (Tokenizer::const_iterator it = begin; it != end; ++it) {
- docstring const input = trim(*it);
- // FIXME UNICODE
- string utf8input(to_utf8(input));
+ for (; it != it_end; ++it) {
+ string utf8input = removeExtension(it->availableFile().absFilename());
string database =
- normalize_name(buffer, runparams, utf8input, ".bib");
- FileName const try_in_file(makeAbsPath(database + ".bib", buffer.filePath()));
- bool const not_from_texmf = isFileReadable(try_in_file);
+ normalizeName(buffer(), runparams, utf8input, ".bib");
+ FileName const try_in_file =
+ makeAbsPath(database + ".bib", buffer().filePath());
+ bool const not_from_texmf = try_in_file.isReadableFile();
if (!runparams.inComment && !runparams.dryrun && !runparams.nice &&
not_from_texmf) {
// mangledFilename() needs the extension
DocFileName const in_file = DocFileName(try_in_file);
database = removeExtension(in_file.mangledFilename());
- FileName const out_file(makeAbsPath(database + ".bib",
- buffer.getMasterBuffer()->temppath()));
+ FileName const out_file = makeAbsPath(database + ".bib",
+ buffer().masterBuffer()->temppath());
- bool const success = copy(in_file, out_file);
+ bool const success = in_file.copyTo(out_file);
if (!success) {
lyxerr << "Failed to copy '" << in_file
<< "' to '" << out_file << "'"
from_utf8(database));
}
- if (it != begin)
+ if (it != bibs.begin())
dbs << ',';
// FIXME UNICODE
dbs << from_utf8(latex_path(database));
Alert::warning(_("Export Warning!"),
_("There are spaces in the paths to your BibTeX databases.\n"
"BibTeX will be unable to find them."));
-
}
// Style-Options
string bibtotoc;
if (prefixIs(style, "bibtotoc")) {
bibtotoc = "bibtotoc";
- if (contains(style, ',')) {
+ if (contains(style, ','))
style = split(style, bibtotoc, ',');
- }
}
// line count
int nlines = 0;
if (!style.empty()) {
- string base =
- normalize_name(buffer, runparams, style, ".bst");
- FileName const try_in_file(makeAbsPath(base + ".bst", buffer.filePath()));
- bool const not_from_texmf = isFileReadable(try_in_file);
+ string base = normalizeName(buffer(), runparams, style, ".bst");
+ FileName const try_in_file =
+ makeAbsPath(base + ".bst", buffer().filePath());
+ bool const not_from_texmf = try_in_file.isReadableFile();
// If this style does not come from texmf and we are not
// exporting to .tex copy it to the tmp directory.
// This prevents problems with spaces and 8bit charcaters
// use new style name
DocFileName const in_file = DocFileName(try_in_file);
base = removeExtension(in_file.mangledFilename());
- FileName const out_file(makeAbsPath(base + ".bst",
- buffer.getMasterBuffer()->temppath()));
- bool const success = copy(in_file, out_file);
+ FileName const out_file = makeAbsPath(base + ".bst",
+ buffer().masterBuffer()->temppath());
+ bool const success = in_file.copyTo(out_file);
if (!success) {
lyxerr << "Failed to copy '" << in_file
<< "' to '" << out_file << "'"
}
// FIXME UNICODE
os << "\\bibliographystyle{"
- << from_utf8(latex_path(normalize_name(buffer, runparams, base, ".bst")))
+ << from_utf8(latex_path(normalizeName(buffer(), runparams, base, ".bst")))
<< "}\n";
nlines += 1;
}
"BibTeX will be unable to find it."));
}
- if (!db_out.empty() && buffer.params().use_bibtopic){
+ if (!db_out.empty() && buffer().params().use_bibtopic) {
os << "\\begin{btSect}{" << db_out << "}\n";
docstring btprint = getParam("btprint");
if (btprint.empty())
}
// bibtotoc-Option
- if (!bibtotoc.empty() && !buffer.params().use_bibtopic) {
+ if (!bibtotoc.empty() && !buffer().params().use_bibtopic) {
// maybe a problem when a textclass has no "art" as
// part of its name, because it's than book.
// For the "official" lyx-layouts it's no problem to support
// all well
- if (!contains(buffer.params().getTextClass().name(),
- "art")) {
- if (buffer.params().sides == TextClass::OneSide) {
+ if (!contains(buffer().params().documentClass().name(), "art")) {
+ if (buffer().params().sides == OneSide) {
// oneside
os << "\\clearpage";
} else {
}
}
- if (!db_out.empty() && !buffer.params().use_bibtopic){
+ if (!db_out.empty() && !buffer().params().use_bibtopic) {
+ docstring btprint = getParam("btprint");
+ if (btprint == "btPrintAll") {
+ os << "\\nocite{*}\n";
+ nlines += 1;
+ }
os << "\\bibliography{" << db_out << "}\n";
nlines += 1;
}
}
-vector<FileName> const InsetBibtex::getFiles(Buffer const & buffer) const
+EmbeddedFileList InsetBibtex::embeddedFiles() const
{
- FileName path(buffer.filePath());
- support::Path p(path);
+ FileName path(buffer().filePath());
+ PathChanger p(path);
- vector<FileName> vec;
+ EmbeddedFileList vec;
string tmp;
+ string emb;
// FIXME UNICODE
string bibfiles = to_utf8(getParam("bibfiles"));
+ string embedStatus = to_utf8(getParam("embed"));
bibfiles = split(bibfiles, tmp, ',');
+ embedStatus = split(embedStatus, emb, ',');
while (!tmp.empty()) {
- FileName const file = findtexfile(changeExtension(tmp, "bib"), "bib");
- LYXERR(Debug::LATEX) << "Bibfile: " << file << endl;
-
- // If we didn't find a matching file name just fail silently
- if (!file.empty())
+ if (!emb.empty()) {
+ EmbeddedFile file(changeExtension(tmp, "bib"), buffer().filePath());
+ // If the file structure is correct, this should not fail.
+ file.setEmbed(true);
+ file.enable(buffer().embedded(), &buffer());
vec.push_back(file);
+ } else {
+ // this includes the cases when the embed parameter is empty
+ FileName const file = findtexfile(changeExtension(tmp, "bib"), "bib");
+
+ // If we didn't find a matching file name just fail silently
+ if (!file.empty()) {
+ EmbeddedFile efile = EmbeddedFile(file.absFilename(), buffer().filePath());
+ efile.setEmbed(false);
+ efile.enable(buffer().embedded(), &buffer());
+ vec.push_back(efile);
+ }
+ }
// Get next file name
bibfiles = split(bibfiles, tmp, ',');
+ embedStatus = split(embedStatus, emb, ',');
}
return vec;
return false;
// read value
- bool legalChar;
+ bool legalChar = true;
while (ifs && !isSpace(ch) &&
- delimChars.find(ch) == docstring::npos &&
- (legalChar = illegalChars.find(ch) == docstring::npos)
- ) {
- if (chCase == makeLowerCase) {
+ delimChars.find(ch) == docstring::npos &&
+ (legalChar = (illegalChars.find(ch) == docstring::npos))
+ )
+ {
+ if (chCase == makeLowerCase)
val += lowercase(ch);
- } else {
+ else
val += ch;
- }
ifs.get(ch);
}
return false;
} else if (ch == '"' || ch == '{') {
+ // set end delimiter
+ char_type delim = ch == '"' ? '"': '}';
- // read delimited text - set end delimiter
- char_type delim = ch == '"'? '"': '}';
-
- // inside this delimited text braces must match.
- // Thus we can have a closing delimiter only
- // when nestLevel == 0
+ //Skip whitespace
+ do {
+ ifs.get(ch);
+ } while (ifs && isSpace(ch));
+
+ if (!ifs)
+ return false;
+
+ //We now have the first non-whitespace character
+ //We'll collapse adjacent whitespace.
+ bool lastWasWhiteSpace = false;
+
+ // inside this delimited text braces must match.
+ // Thus we can have a closing delimiter only
+ // when nestLevel == 0
int nestLevel = 0;
-
- ifs.get(ch);
+
while (ifs && (nestLevel > 0 || ch != delim)) {
+ if (isSpace(ch)) {
+ lastWasWhiteSpace = true;
+ ifs.get(ch);
+ continue;
+ }
+ //We output the space only after we stop getting
+ //whitespace so as not to output any whitespace
+ //at the end of the value.
+ if (lastWasWhiteSpace) {
+ lastWasWhiteSpace = false;
+ val += ' ';
+ }
+
val += ch;
// update nesting level
// This method returns a comma separated list of Bibtex entries
-void InsetBibtex::fillWithBibKeys(Buffer const & buffer,
- std::vector<std::pair<string, docstring> > & keys) const
+void InsetBibtex::fillWithBibKeys(BiblioInfo & keylist,
+ InsetIterator const & /*di*/) const
{
- vector<FileName> const files = getFiles(buffer);
- for (vector<FileName>::const_iterator it = files.begin();
+ EmbeddedFileList const files = embeddedFiles();
+ for (vector<EmbeddedFile>::const_iterator it = files.begin();
it != files.end(); ++ it) {
// This bibtex parser is a first step to parse bibtex files
// more precisely.
// - it accepts more characters in keys or value names than
// bibtex does.
//
- // TODOS:
- // - the entries are split into name = value pairs by the
- // parser. These have to be merged again because of the
- // way lyx treats the entries ( pair<...>(...) ). The citation
- // mechanism in lyx should be changed such that it can use
- // the split entries.
- // - messages on parsing errors can be generated.
- //
-
// Officially bibtex does only support ASCII, but in practice
// you can use the encoding of the main document as long as
// some elements like keys and names are pure ASCII. Therefore
// We don't restrict keys to ASCII in LyX, since our own
// InsetBibitem can generate non-ASCII keys, and nonstandard
// 8bit clean bibtex forks exist.
- idocfstream ifs(it->toFilesystemEncoding().c_str(),
- std::ios_base::in,
- buffer.params().encoding().iconvName());
+
+ idocfstream ifs(it->availableFile().toFilesystemEncoding().c_str(),
+ ios_base::in, buffer().params().encoding().iconvName());
char_type ch;
VarMap strings;
if (entryType == from_ascii("comment")) {
- ifs.ignore(std::numeric_limits<int>::max(), '\n');
+ ifs.ignore(numeric_limits<int>::max(), '\n');
continue;
}
} else {
- // Citation entry. Read the key and all name = value pairs
+ // Citation entry. Try to read the key.
docstring key;
- docstring fields;
- docstring name;
- docstring value;
- docstring commaNewline;
if (!readTypeOrKey(key, ifs, from_ascii(","),
from_ascii("}"), keepCase) || !ifs)
continue;
- // now we have a key, so we will add an entry
- // (even if it's empty, as bibtex does)
+ /////////////////////////////////////////////
+ // now we have a key, so we will add an entry
+ // (even if it's empty, as bibtex does)
//
- // all items must be separated by a comma. If
- // it is missing the scanning of this entry is
- // stopped and the next is searched.
+ // we now read the field = value pairs.
+ // all items must be separated by a comma. If
+ // it is missing the scanning of this entry is
+ // stopped and the next is searched.
+ docstring fields;
+ docstring name;
+ docstring value;
+ docstring commaNewline;
+ docstring data;
+ BibTeXInfo keyvalmap(key, entryType);
+
bool readNext = removeWSAndComma(ifs);
-
+
while (ifs && readNext) {
// read field name
if (!readValue(value, ifs, strings))
break;
- // append field to the total entry string.
- //
- // TODO: Here is where the fields can be put in
- // a more intelligent structure that preserves
- // the already known parts.
- fields += commaNewline;
- fields += name + from_ascii(" = {") + value + '}';
-
- if (!commaNewline.length())
- commaNewline = from_ascii(",\n");
-
+ keyvalmap[name] = value;
+ data += "\n\n" + value;
+ keylist.addFieldName(name);
readNext = removeWSAndComma(ifs);
}
// add the new entry
- keys.push_back(pair<string, docstring>(
- to_utf8(key), fields));
+ keylist.addEntryType(entryType);
+ keyvalmap.setAllData(data);
+ keylist[key] = keyvalmap;
}
-
} //< searching '@'
-
} //< for loop over files
}
}
+void InsetBibtex::registerEmbeddedFiles(EmbeddedFileList & files) const
+{
+ EmbeddedFileList const dbs = embeddedFiles();
+ for (vector<EmbeddedFile>::const_iterator it = dbs.begin();
+ it != dbs.end(); ++it)
+ files.registerFile(*it, this, buffer());
+}
+
+
+void InsetBibtex::updateEmbeddedFile(EmbeddedFile const & file)
+{
+ // look for the item and update status
+ docstring bibfiles;
+ docstring embed;
+
+ bool first = true;
+ EmbeddedFileList dbs = embeddedFiles();
+ for (EmbeddedFileList::iterator it = dbs.begin();
+ it != dbs.end(); ++it) {
+ // update from file
+ if (it->absFilename() == file.absFilename())
+ it->setEmbed(file.embedded());
+ // write parameter string
+ if (!first) {
+ bibfiles += ',';
+ embed += ',';
+ } else {
+ first = false;
+ }
+ bibfiles += from_utf8(it->outputFilename(buffer().filePath()));
+ if (it->embedded())
+ embed += from_utf8(it->inzipName());
+ }
+ setParam("bibfiles", bibfiles);
+ setParam("embed", embed);
+}
+
+
} // namespace lyx