import sys
import re
import time
+import io
+import codecs
try:
import lyx2lyx_version
version__ = lyx2lyx_version.version
except: # we are running from build directory so assume the last version
- version__ = '2.2'
+ version__ = '2.3'
default_debug__ = 2
+# Provide support for both python 2 and 3
+PY2 = sys.version_info[0] == 2
+# End of code to support for both python 2 and 3
+
####################################################################
# Private helper functions
("1_6", list(range(277,346)), minor_versions("1.6" , 10)),
("2_0", list(range(346,414)), minor_versions("2.0" , 8)),
("2_1", list(range(414,475)), minor_versions("2.1" , 5)),
- ("2_2", list(range(475,509)), minor_versions("2.2" , 0))
+ ("2_2", list(range(475,509)), minor_versions("2.2" , 0)),
+ ("2_3", (), minor_versions("2.3" , 0))
]
####################################################################
return line[:-1]
+def trim_eol_binary(line):
+ " Remove end of line char(s)."
+ if line[-1] != 10 and line[-1] != 13:
+ # May happen for the last line of a document
+ return line
+ if line[-2:-1] == 13:
+ return line[:-2]
+ else:
+ return line[:-1]
+
+
def get_encoding(language, inputencoding, format, cjk_encoding):
" Returns enconding of the lyx file"
if format > 248:
class LyX_base:
"""This class carries all the information of the LyX file."""
- def __init__(self, end_format = 0, input = "", output = "", error = "",
- debug = default_debug__, try_hard = 0, cjk_encoding = '',
- final_version = "", systemlyxdir = '', language = "english",
- encoding = "auto"):
+ def __init__(self, end_format = 0, input = u'', output = u'', error = u'',
+ debug = default_debug__, try_hard = 0, cjk_encoding = u'',
+ final_version = u'', systemlyxdir = u'', language = u'english',
+ encoding = u'auto'):
"""Arguments:
end_format: final format that the file should be converted. (integer)
error: the name of the error file, if empty use the standard error.
debug: debug level, O means no debug, as its value increases be more verbose.
"""
- self.choose_io(input, output)
+ self.choose_input(input)
+ self.output = output
if error:
self.err = open(error, "w")
"""Reads a file into the self.header and
self.body parts, from self.input."""
+ # First pass: Read header to determine file encoding
+ # If we are running under python3 then all strings are binary in this
+ # pass. In some cases we need to convert binary to unicode in order to
+ # use our parser tools. Since we do not know the true encoding yet we
+ # use latin1. This works since a) the parts we are interested in are
+ # pure ASCII (subset of latin1) and b) in contrast to pure ascii or
+ # utf8, one can decode any 8byte string using latin1.
+ first_line = True
while True:
line = self.input.readline()
if not line:
- self.error("Invalid LyX file.")
+ # eof found before end of header
+ self.error("Invalid LyX file: Missing body.")
+
+ if first_line:
+ # Remove UTF8 BOM marker if present
+ if line.startswith(codecs.BOM_UTF8):
+ line = line[len(codecs.BOM_UTF8):]
- line = trim_eol(line)
- if check_token(line, '\\begin_preamble'):
- while 1:
+ first_line = False
+
+ if PY2:
+ line = trim_eol(line)
+ decoded = line
+ else:
+ line = trim_eol_binary(line)
+ decoded = line.decode('latin1')
+ if check_token(decoded, '\\begin_preamble'):
+ while True:
line = self.input.readline()
if not line:
- self.error("Invalid LyX file.")
+ # eof found before end of header
+ self.error("Invalid LyX file: Missing body.")
- line = trim_eol(line)
- if check_token(line, '\\end_preamble'):
+ if PY2:
+ line = trim_eol(line)
+ decoded = line
+ else:
+ line = trim_eol_binary(line)
+ decoded = line.decode('latin1')
+ if check_token(decoded, '\\end_preamble'):
break
- if line.split()[:0] in ("\\layout",
+ if decoded.split()[:0] in ("\\layout",
"\\begin_layout", "\\begin_body"):
self.warning("Malformed LyX file:"
self.preamble.append(line)
- if check_token(line, '\\end_preamble'):
+ if check_token(decoded, '\\end_preamble'):
continue
line = line.rstrip()
if not line:
continue
- if line.split()[0] in ("\\layout", "\\begin_layout",
+ if decoded.split()[0] in ("\\layout", "\\begin_layout",
"\\begin_body", "\\begin_deeper"):
self.body.append(line)
break
self.header.append(line)
- i = find_token(self.header, '\\textclass', 0)
+ if PY2:
+ i = find_token(self.header, '\\textclass', 0)
+ else:
+ i = find_token(self.header, b'\\textclass', 0)
if i == -1:
self.warning("Malformed LyX file: Missing '\\textclass'.")
- i = find_token(self.header, '\\lyxformat', 0) + 1
- self.header[i:i] = ['\\textclass article']
-
- self.textclass = get_value(self.header, "\\textclass", 0)
- self.backend = get_backend(self.textclass)
- self.format = self.read_format()
- self.language = get_value(self.header, "\\language", 0,
- default = "english")
- self.inputencoding = get_value(self.header, "\\inputencoding",
- 0, default = "auto")
+ if PY2:
+ i = find_token(self.header, '\\lyxformat', 0) + 1
+ self.header[i:i] = ['\\textclass article']
+ else:
+ i = find_token(self.header, b'\\lyxformat', 0) + 1
+ self.header[i:i] = [b'\\textclass article']
+
+ if PY2:
+ self.textclass = get_value(self.header, "\\textclass", 0,
+ default = "")
+ self.language = get_value(self.header, "\\language", 0,
+ default = "english")
+ self.inputencoding = get_value(self.header, "\\inputencoding", 0,
+ default = "auto")
+ else:
+ self.textclass = get_value(self.header, b"\\textclass", 0,
+ default = b"")
+ self.language = get_value(self.header, b"\\language", 0,
+ default = b"english").decode('ascii')
+ self.inputencoding = get_value(self.header, b"\\inputencoding", 0,
+ default = b"auto").decode('ascii')
+ self.format = self.read_format()
+ self.initial_format = self.format
self.encoding = get_encoding(self.language,
self.inputencoding, self.format,
self.cjk_encoding)
# Second pass over header and preamble, now we know the file encoding
# Do not forget the textclass (Debian bug #700828)
self.textclass = self.textclass.decode(self.encoding)
+ self.backend = get_backend(self.textclass)
for i in range(len(self.header)):
self.header[i] = self.header[i].decode(self.encoding)
for i in range(len(self.preamble)):
self.preamble[i] = self.preamble[i].decode(self.encoding)
+ for i in range(len(self.body)):
+ self.body[i] = self.body[i].decode(self.encoding)
# Read document body
- while 1:
+ while True:
line = self.input.readline().decode(self.encoding)
if not line:
break
def write(self):
" Writes the LyX file to self.output."
+ self.choose_output(self.output)
self.set_version()
self.set_format()
self.set_textclass()
header = self.header
for line in header + [''] + self.body:
- self.output.write(line.encode(self.encoding)+"\n")
+ self.output.write(line+u"\n")
- def choose_io(self, input, output):
- """Choose input and output streams, dealing transparently with
+ def choose_output(self, output):
+ """Choose output streams dealing transparently with
compressed files."""
- if output:
- self.output = open(output, "wb")
+ # This is a bit complicated, because we need to be compatible both with
+ # python 2 and python 3. Therefore we handle the encoding here and not
+ # when writing individual lines and may need up to 3 layered file like
+ # interfaces.
+ if self.compressed:
+ if output:
+ outputfileobj = open(output, 'wb')
+ else:
+ # We cannot not use stdout directly since it needs text, not bytes in python 3
+ outputfileobj = os.fdopen(sys.stdout.fileno(), 'wb')
+ # We cannot not use gzip.open() since it is not supported by python 2
+ zipbuffer = gzip.GzipFile(mode='wb', fileobj=outputfileobj)
+ # We do not want to use different newlines on different OSes inside zipped files
+ self.output = io.TextIOWrapper(zipbuffer, encoding=self.encoding, newline='\n')
else:
- self.output = sys.stdout
+ if output:
+ self.output = io.open(output, 'w', encoding=self.encoding)
+ else:
+ self.output = io.open(sys.stdout.fileno(), 'w', encoding=self.encoding)
+
- if input and input != '-':
+ def choose_input(self, input):
+ """Choose input stream, dealing transparently with
+ compressed files."""
+
+ # Since we do not know the encoding yet we need to read the input as
+ # bytes in binary mode, and convert later to unicode.
+ if input and input != u'-':
self.dir = os.path.dirname(os.path.abspath(input))
try:
gzip.open(input).readline()
self.input = gzip.open(input)
- self.output = gzip.GzipFile(mode="wb", fileobj=self.output)
+ self.compressed = True
except:
- self.input = open(input)
+ self.input = open(input, 'rb')
+ self.compressed = False
else:
- self.dir = ''
- self.input = sys.stdin
+ self.dir = u''
+ self.input = os.fdopen(sys.stdin.fileno(), 'rb')
+ self.compressed = False
def lyxformat(self, format):
def read_format(self):
" Read from the header the fileformat of the present LyX file."
for line in self.header:
- result = fileformat.match(line)
+ if PY2:
+ result = fileformat.match(line)
+ else:
+ result = fileformat.match(line.decode('ascii'))
if result:
return self.lyxformat(result.group(1))
else:
- self.error("Invalid LyX File.")
+ self.error("Invalid LyX File: Missing format.")
return None
conversion are taken. It returns a list of modules needed to
convert the LyX file from self.format to self.end_format"""
- self.start = self.format
format = self.format
correct_version = 0
# Convertion mode, back or forth
steps = []
- if (initial_step, self.start) < (final_step, self.end_format):
+ if (initial_step, self.initial_format) < (final_step, self.end_format):
mode = "convert"
full_steps = []
for step in format_relation:
# toc_par = []
# i = 0
-# while 1:
+# while True:
# i = find_tokens(self.body, sections, i)
# if i == -1:
# break
class File(LyX_base):
" This class reads existing LyX files."
- def __init__(self, end_format = 0, input = "", output = "", error = "",
- debug = default_debug__, try_hard = 0, cjk_encoding = '',
- final_version = '', systemlyxdir = ''):
+ def __init__(self, end_format = 0, input = u'', output = u'', error = u'',
+ debug = default_debug__, try_hard = 0, cjk_encoding = u'',
+ final_version = u'', systemlyxdir = u''):
LyX_base.__init__(self, end_format, input, output, error,
debug, try_hard, cjk_encoding, final_version,
systemlyxdir)
lyx_2_0.py \
lyx_2_1.py \
lyx_2_2.py \
+ lyx_2_3.py \
profiling.py \
test_parser_tools.py
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
" Program used to convert between different versions of the lyx file format."
-import optparse
+import argparse
import sys
import LyX
+# Provide support for both python 2 and 3
+PY2 = sys.version_info[0] == 2
+if PY2:
+ # argparse returns strings in the commandline encoding, we need to convert.
+ # sys.getdefaultencoding() would not always be correct, see
+ # http://legacy.python.org/dev/peps/pep-0383/
+ def cmd_arg(arg):
+ return arg.decode(sys.getfilesystemencoding())
+else:
+ cmd_arg = str
+# End of code to support for both python 2 and 3
+
def main():
args = {}
- args["usage"] = "usage: %prog [options] [file]"
-
- args["version"] = """lyx2lyx, version %s
-Copyright (C) 2011 The LyX Team, José Matos and Dekel Tsur""" % LyX.version__
+ args["usage"] = "%(prog)s [options] [file]"
args["description"] = """Convert old lyx file <file> to newer format,
files can be compressed with gzip. If there no file is specified then
the standard input is assumed, in this case gziped files are not
handled."""
- parser = optparse.OptionParser(**args)
+ parser = argparse.ArgumentParser(**args)
parser.set_defaults(debug=LyX.default_debug__, cjk_encoding = '')
- parser.add_option("-d", "--debug", type="int",
+ parser.add_argument("-d", "--debug", type=int, dest="debug",
help="level=0..2 (O_ quiet, 10_verbose) default: 2")
- parser.add_option("-q", "--quiet",
+ parser.add_argument("-q", "--quiet",
action="store_const", const=0, dest="debug")
- parser.add_option("-v", "--verbose",
+ parser.add_argument("-v", "--verbose",
action="store_const", const=1, dest="debug")
- parser.add_option("--noisy",
+ parser.add_argument("--noisy",
action="store_const", const=10, dest="debug")
- parser.add_option("-c", "--encoding", dest="cjk_encoding",
+ parser.add_argument("-c", "--encoding", type=cmd_arg, dest="cjk_encoding",
help="files in format 413 and lower are read and"
" written in the format of CJK-LyX."
"If encoding is not given or 'auto' the encoding"
"is determined from the locale.")
- parser.add_option("-e", "--err", dest="error",
+ parser.add_argument("-e", "--err", type=cmd_arg, dest="error",
help= "file name of the error file else goes to stderr")
- parser.add_option("-o", "--output",
+ parser.add_argument("-o", "--output", type=cmd_arg, dest="output",
help= "name of the output file else goes to stdout")
- parser.add_option("-t", "--to", dest= "end_format",
+ parser.add_argument("-t", "--to", type=cmd_arg, dest= "end_format",
help= "destination file format, default (latest)")
- parser.add_option("-V", "--final_version", dest= "final_version",
+ parser.add_argument("-V", "--final_version", type=cmd_arg, dest= "final_version",
help= "destination version, default (latest)")
- parser.add_option("-l", "--list", action="store_true",
+ parser.add_argument("-l", "--list", action="store_true",
help = "list all available formats and supported versions")
- parser.add_option("-n", "--try-hard", action="store_true",
+ parser.add_argument("-n", "--try-hard", action="store_true",
help = "try hard (ignore any convertion errors)")
- parser.add_option("-s", "--systemlyxdir", dest= "systemlyxdir",
+ parser.add_argument("-s", "--systemlyxdir", type=cmd_arg, dest= "systemlyxdir",
help= "LyX system directory for conversion from version 489 or older")
+ parser.add_argument('--version', action='version', version="""lyx2lyx, version %s
+Copyright (C) 2011 The LyX Team, José Matos and Dekel Tsur""" % LyX.version__)
+ parser.add_argument("input", nargs='?', type=cmd_arg, default=None)
- (options, args) = parser.parse_args()
- if args:
- options.input = args[0]
- else:
- options.input = None
+ options = parser.parse_args()
if options.list:
sys.stderr.write(LyX.format_info())
" Remove empty line before \\layout. "
lines = document.body
i = 2 # skip first layout
- while 1:
+ while True:
i = find_token(lines, '\\layout', i)
if i == -1:
break
" Remove space after inset formula."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Formula", i)
if i == -1:
break
lines = document.body
lyxtable_re = re.compile(r".*\\LyXTable$")
i = 0
- while 1:
+ while True:
i = find_re(lines, lyxtable_re, i)
if i == -1:
break
" Update inset Label."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, '\\begin_inset Label', i)
if i == -1:
return
" Update inset LatexDel."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, '\\begin_inset LatexDel', i)
if i == -1:
return
lines = document.body
i = 0
cursor_re = re.compile(r'.*(\\cursor \d*)')
- while 1:
+ while True:
i = find_re(lines, cursor_re, i)
if i == -1:
break
" Remove empty insets."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, '\\begin_inset ', i)
if i == -1:
break
" Remove formula latex."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, '\\latex formula_latex ', i)
if i == -1:
break
" Update latex accent insets."
body = document.body
i = 1
- while 1:
+ while True:
i = find_token(body, '\\i ', i)
if i == -1:
return
" Replace layout Latex_Title with Title."
body = document.body
i = 0
- while 1:
+ while True:
i = find_token(body, '\\layout', i)
if i == -1:
return
body = document.body
i = 0
- while 1:
+ while True:
i = find_token(body, '\\begin_inset Latex', i)
if i == -1:
return
body = document.body
i = 0
- while 1:
+ while True:
i = find_token(body, '\\layout', i)
if i == -1:
return
lines = document.body
lyxtable_re = re.compile(r".*\\LyXTable$")
i = 0
- while 1:
+ while True:
i = find_re(lines, lyxtable_re, i)
if i == -1:
break
" Replace protected separator. "
lines = document.body
i=0
- while 1:
+ while True:
i = find_token(lines, "\\protected_separator", i)
if i == -1:
break
" Merge formula insets. "
lines = document.body
i=0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Formula", i)
if i == -1: break
if lines[i+1] in math_env:
lines = document.body
lyxtable_re = re.compile(r".*\\LyXTable$")
i=0
- while 1:
+ while True:
i = find_re(lines, lyxtable_re, i)
if i == -1:
break
" Update table of contents. "
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines,
'\\begin_inset LatexCommand \\tableofcontents', i)
if i == -1:
for margin in margins:
i = 0
- while 1:
+ while True:
i = find_token(lines, margin, i)
if i == -1:
break
lines = document.body
# play safe, clean empty lines
- while 1:
+ while True:
if lines[i]:
break
del lines[i]
del lines[i:j + 1]
# play safe, clean empty lines
- while 1:
+ while True:
if lines[i]:
break
del lines[i]
" Update reference inset. "
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, '\\begin_inset LatexCommand', i)
if i == -1:
return
lines = document.body
i = 0
latexdel_re = re.compile(r".*\\begin_inset LatexDel")
- while 1:
+ while True:
i = find_re(lines, latexdel_re, i)
if i == -1:
return
" Update tabular to version 1 (xml like syntax). "
lines = document.body
i=0
- while 1:
+ while True:
i = find_re(lines, lyxtable_re, i)
if i == -1:
break
regexp = re.compile(r'^\\begin_inset\s+Tabular')
lines = document.body
i=0
- while 1:
+ while True:
i = find_re(lines, regexp, i)
if i == -1:
break
" Change \begin_float .. \end_float into \begin_inset Float .. \end_inset"
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_float", i)
if i == -1:
break
lines = document.body
i = 0
flag = 0
- while 1:
+ while True:
i = find_re(lines, pextra_type2_rexp, i)
if i == -1:
break
j = get_next_paragraph(lines, i, document.format + 1)
count = 0
- while 1:
+ while True:
# collect more paragraphs to the minipage
count = count+1
if j == -1 or not check_token(lines[j], "\\layout"):
""]
lines = document.body
i = 0
- while 1:
+ while True:
i = find_tokens(lines, ["\\latex latex", "\\layout LaTeX"], i)
if i == -1:
break
j = i+1
- while 1:
+ while True:
# \end_inset is for ert inside a tabular cell. The other tokens
# are obvious.
j = find_tokens(lines, ["\\latex default", "\\layout", "\\begin_inset", "\\end_inset", "\\end_float", "\\the_end"],
new = ['\layout %s' % document.default_layout, "", ""]
k = i+1
- while 1:
+ while True:
k2 = find_re(lines, ert_rexp, k, j)
inset = hfill = specialchar = 0
if k2 == -1:
# Delete remaining "\latex xxx" tokens
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\latex ", i)
if i == -1:
break
" ERT insert are hidden feature of lyx 1.1.6. This might be removed in the future."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset ERT", i)
if i == -1:
break
" Combine ERT paragraphs."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset ERT", i)
if i == -1:
break
" Remove figinset."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Figure", i)
if i == -1:
break
regexp = re.compile(r'^\\begin_inset\s+Tabular')
lines = document.body
i = 0
- while 1:
+ while True:
i = find_re(lines, regexp, i)
if i == -1:
break
regexp = re.compile(r'^\\begin_inset\s+Tabular')
body = document.body
i = 0
- while 1:
+ while True:
i = find_re(body, regexp, i)
if i == -1:
break
" Figure insert are hidden feature of lyx 1.1.6. This might be removed in the future."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Float ", i)
if i == -1:
break
" Change listof insets."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset LatexCommand \\listof", i)
if i == -1:
break
" Change info inset."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Info", i)
if i == -1:
break
if i == -1:
return
lines[i+1:i+1] = ["\\use_natbib 0",
- "\use_numerical_citations 0"]
+ "\\use_numerical_citations 0"]
supported_versions = ["1.2.%d" % i for i in range(5)] + ["1.2"]
" Change inset Graphics."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "\\begin_inset Graphics", i)
if i == -1:
break
" Change tabular."
lines = document.body
i = 0
- while 1:
+ while True:
i = find_token(lines, "<column", i)
if i == -1:
break
def remove_color_default(document):
" Remove \color default"
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\color default", i)
if i == -1:
return
" \InsetSpace ~ -> \SpecialChar ~"
regexp = re.compile(r'(.*)(\\InsetSpace\s+)(\S+)')
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
break
"\\begin_inset LatexCommand \\eqref -> ERT"
regexp = re.compile(r'^\\begin_inset\s+LatexCommand\s+\\eqref')
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
break
def remove_insetparent(document):
" Remove \lyxparent"
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand \\lyxparent", i)
if i == -1:
break
external_rexp = re.compile(r'\\begin_inset External ([^,]*),"([^"]*)",')
external_header = "\\begin_inset External"
i = 0
- while 1:
+ while True:
i = find_token(document.body, external_header, i)
if i == -1:
break
" Revert inset External."
external_header = "\\begin_inset External"
i = 0
- while 1:
+ while True:
i = find_token(document.body, external_header, i)
if i == -1:
break
" Revert inset External. (part II)"
draft_token = '\tdraft'
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset External', i)
if i == -1:
break
" Convert \\layout comment"
i = 0
comment = "\\layout Comment"
- while 1:
+ while True:
i = find_token(document.body, comment, i)
if i == -1:
return
'\\layout %s' % document.default_layout]
i = i + 7
- while 1:
+ while True:
old_i = i
i = find_token(document.body, "\\layout", i)
if i == -1:
def revert_comment(document):
" Revert comments"
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Comment", "\\begin_inset Greyedout"], i)
if i == -1:
i = i + 1
struct_stack = ["\\layout"]
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset", "\\end_inset", "\\layout",
"\\begin_deeper", "\\end_deeper", "\\the_end"], i)
def rm_end_layout(document):
" Remove \end_layout"
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\end_layout', i)
if i == -1:
def rm_body_changes(document):
" Remove body changes."
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\change_", i)
if i == -1:
return
def layout2begin_layout(document):
" \layout -> \begin_layout "
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\layout', i)
if i == -1:
return
def begin_layout2layout(document):
" \begin_layout -> \layout "
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_layout', i)
if i == -1:
return
" Convert table valignment, center -> middle"
regexp = re.compile(r'^\\begin_inset\s+Tabular')
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
return
" Convert table valignment, middle -> center"
regexp = re.compile(r'^\\begin_inset\s+Tabular')
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
return
attribute_values = ['default', 'default', 'default', 'default',
'default', 'default', 'default', 'none', document.language]
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout", i)
if i == -1:
return
def convert_note(document):
" Convert Notes. "
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Note",
"\\begin_inset Comment",
"\\begin_inset Greyedout"], i)
" Revert Notes. "
note_header = "\\begin_inset Note "
i = 0
- while 1:
+ while True:
i = find_token(document.body, note_header, i)
if i == -1:
break
def convert_box(document):
" Convert Boxes. "
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Boxed",
"\\begin_inset Doublebox",
"\\begin_inset Frameless",
" Revert Boxes."
box_header = "\\begin_inset Box "
i = 0
- while 1:
+ while True:
i = find_token(document.body, box_header, i)
if i == -1:
break
i = i + 1
-def convert_collapsable(document):
+def convert_collapsible(document):
" Convert collapsed insets. "
i = 0
- while 1:
+ while True:
i = find_tokens_exact(document.body, ["\\begin_inset Box",
"\\begin_inset Branch",
"\\begin_inset CharStyle",
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i] == "collapsed false"):
document.body[i] = "status open"
break
i = i + 1
-def revert_collapsable(document):
+def revert_collapsible(document):
" Revert collapsed insets. "
i = 0
- while 1:
+ while True:
i = find_tokens_exact(document.body, ["\\begin_inset Box",
"\\begin_inset Branch",
"\\begin_inset CharStyle",
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i] == "status open"):
document.body[i] = "collapsed false"
break
def convert_ert(document):
" Convert ERT. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset ERT", i)
if i == -1:
break
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i] == "status Open"):
document.body[i] = "status open"
break
def revert_ert(document):
" Revert ERT. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset ERT", i)
if i == -1:
break
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i] == "status open"):
document.body[i] = "status Open"
break
inner_pos = ["c","t","b","s"]
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Minipage", i)
if i == -1:
return
# Convert the insets
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, tokens, i)
if i == -1:
return
pos = ['t', 'c', 'b']
inner_pos = ['c', 't', 'b', 's']
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset Frameless', i)
if i == -1:
return
def remove_branches(document):
" Remove branches. "
i = 0
- while 1:
+ while True:
i = find_token(document.header, "\\branch", i)
if i == -1:
break
del document.header[i:j+1]
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Branch", i)
if i == -1:
return
# If, however, we find a line starting '\layout'
# (_always_ present) then break with a warning message
collapsed_found = 0
- while 1:
+ while True:
if (document.body[i][:9] == "collapsed"):
del document.body[i]
collapsed_found = 1
def convert_float(document):
" Convert sideway floats. "
i = 0
- while 1:
+ while True:
i = find_token_exact(document.body, '\\begin_inset Float', i)
if i == -1:
return
# If, however, we find a line starting '\begin_layout'
# (_always_ present) then break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i][:4] == "wide"):
document.body.insert(i + 1, 'sideways false')
break
def revert_float(document):
" Revert sideways floats. "
i = 0
- while 1:
+ while True:
i = find_token_exact(document.body, '\\begin_inset Float', i)
if i == -1:
return
""" Add extension to documentnames of insetgraphics if necessary.
"""
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
return
i = i + 1
filename = document.body[j].split()[1]
- absname = os.path.normpath(os.path.join(document.dir, filename))
- if document.input == stdin and not os.path.isabs(filename):
+ if document.dir == u'' and not os.path.isabs(filename):
# We don't know the directory and cannot check the document.
# We could use a heuristic and take the current directory,
# and we could try to find out if documentname has an extension,
You may need to correct the document manually or run
lyx2lyx again with the .lyx document as commandline argument.""" % filename)
continue
+ absname = os.path.normpath(os.path.join(document.dir, filename))
# This needs to be the same algorithm as in pre 233 insetgraphics
if access(absname, F_OK):
continue
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout Author", i)
if i == -1:
return
def convert_bullets(document):
" Convert bullets. "
i = 0
- while 1:
+ while True:
i = find_token(document.header, "\\bullet", i)
if i == -1:
return
def revert_bullets(document):
" Revert bullets. "
i = 0
- while 1:
+ while True:
i = find_token(document.header, "\\bullet", i)
if i == -1:
return
"\\leftindent"
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_layout', i)
if i == -1:
return
i = i + 1
- while 1:
+ while True:
if body[i].strip() and body[i].split()[0] not in allowed_parameters:
break
'\\emph', '\\numeric', '\\bar', '\\noun',
'\\color', '\\lang']
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset ERT', i)
if i == -1:
return
# convert non-standard paragraphs to standard
k = i
- while 1:
+ while True:
k = find_token(document.body, "\\begin_layout", k, j)
if k == -1:
break
# insert an empty paragraph before each paragraph but the first
k = i
first_pagraph = 1
- while 1:
+ while True:
k = find_token(document.body, "\\begin_layout", k, j)
if k == -1:
break
# convert \\newline to new paragraph
k = i
- while 1:
+ while True:
k = find_token(document.body, "\\newline", k, j)
if k == -1:
break
def revert_ert_paragraphs(document):
" Remove double paragraph breaks. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset ERT', i)
if i == -1:
return
# replace paragraph breaks with \newline
k = i
- while 1:
+ while True:
k = find_token(document.body, "\\end_layout", k, j)
l = find_token(document.body, "\\begin_layout", k, j)
if k == -1 or l == -1:
# replace double \newlines with paragraph breaks
k = i
- while 1:
+ while True:
k = find_token(document.body, "\\newline", k, j)
if k == -1:
break
# Change language in the document body
regexp = re.compile(r'^\\lang\s+frenchb')
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
break
return
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout SGML", i)
if i == -1:
convert_table_valignment_middle, convert_breaks]],
[226, [convert_note]],
[227, [convert_box]],
- [228, [convert_collapsable, convert_ert]],
+ [228, [convert_collapsible, convert_ert]],
[229, [convert_minipage]],
[230, [convert_jurabib]],
[231, [convert_float]],
[230, [revert_float]],
[229, [revert_jurabib]],
[228, []],
- [227, [revert_collapsable, revert_ert]],
+ [227, [revert_collapsible, revert_ert]],
[226, [revert_box, revert_external_2]],
[225, [revert_note]],
[224, [rm_end_layout, begin_layout2layout, revert_end_document,
unichr = chr
else:
text_type = unicode
- unichr = unichr
# End of code to support for both python 2 and 3
####################################################################
def revert_framed(document):
"Revert framed notes. "
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Note Framed", "\\begin_inset Note Shaded"], i)
if i == -1:
re_bspace = re.compile(r'\s+bottomspace="[^"]+"')
re_ispace = re.compile(r'\s+interlinespace="[^"]+"')
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Tabular", i)
if i == -1:
return
def revert_cs_label(document):
" Remove status flag of charstyle label. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CharStyle", i)
if i == -1:
return
# Seach for a line starting 'show_label'
# If it is not there, break with a warning message
i = i + 1
- while 1:
+ while True:
if (document.body[i][:10] == "show_label"):
del document.body[i]
break
This must be called after convert_commandparams.
"""
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\bibitem", i)
if i == -1:
break
# convert_bibitem()), but could be read in, so we convert it here, too.
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand", i)
if i == -1:
break
def revert_commandparams(document):
regex = re.compile(r'(\S+)\s+(.+)')
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand", i)
if i == -1:
break
regex = re.compile(r'(\S+)\s+(.+)')
i = 0
use_nomencl = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand nomenclature", i)
if i == -1:
break
regex = re.compile(r'(\S+)\s+(.+)')
i = 0
use_nomencl = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand printnomenclature", i)
if i == -1:
break
def revert_clearpage(document):
" clearpage -> ERT "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\clearpage", i)
if i == -1:
break
def revert_cleardoublepage(document):
" cleardoublepage -> ERT "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\cleardoublepage", i)
if i == -1:
break
def convert_caption(document):
" Convert caption layouts to caption insets. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout Caption", i)
if i == -1:
return
" Convert caption insets to caption layouts. "
" This assumes that the text class has a caption style. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Caption", i)
if i == -1:
return
re_contents = re.compile(r'^([^\s{]+)(.*)$')
re_accentedcontents = re.compile(r'^\s*{?([^{}]*)}?\s*$')
i = 0
- while 1:
+ while True:
i = find_re(document.body, re_wholeinset, i)
if i == -1:
return
def revert_beamer_alert(document):
" Revert beamer's \\alert inset back to ERT. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CharStyle Alert", i)
if i == -1:
return
document.body[i] = "\\begin_inset ERT"
i = i + 1
- while 1:
+ while True:
if (document.body[i][:13] == "\\begin_layout"):
# Insert the \alert command
document.body[i + 1] = "\\alert{" + document.body[i + 1] + '}'
def revert_beamer_structure(document):
" Revert beamer's \\structure inset back to ERT. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CharStyle Structure", i)
if i == -1:
return
document.body[i] = "\\begin_inset ERT"
i = i + 1
- while 1:
+ while True:
if (document.body[i][:13] == "\\begin_layout"):
document.body[i + 1] = "\\structure{" + document.body[i + 1] + '}'
break
def convert_graphics_rotation(document):
" add scaleBeforeRotation graphics parameter. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
def revert_graphics_rotation(document):
" remove scaleBeforeRotation graphics parameter. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
import sys, os
from parser_tools import find_token, find_end_of, find_tokens, get_value
+from unicode_symbols import read_unicodesymbols
####################################################################
# Private helper functions
return l
-# FIXME: Use the version in unicode_symbols.py which has some bug fixes
-def read_unicodesymbols():
+# FIXME: Remove this function if the version imported from unicode_symbols works.
+# This function was the predecessor from that function, that in the meanwhile got
+# new fixes.
+def read_unicodesymbols2():
" Read the unicodesymbols list of unicode characters and corresponding commands."
+
+ # Provide support for both python 2 and 3
+ PY2 = sys.version_info[0] == 2
+ if not PY2:
+ unichr = chr
+ # End of code to support for both python 2 and 3
+
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
fp = open(os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols'))
spec_chars = []
continue
inert = ert_end >= curline
content += lyxline2latex(document, lines[curline], inert)
-
+
return content
def remove_inzip_options(document):
"Remove inzipName and embed options from the Graphics inset"
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
LatexCommand cmd
"""
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset LatexCommand", i)
if i == -1:
return
will not be able to recognize. Not sure what to do about that.
"""
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CommandInset", i)
if i == -1:
return
def convert_latexcommand_index(document):
- "Convert from LatexCommand form to collapsable form."
+ "Convert from LatexCommand form to collapsible form."
i = 0
r1 = re.compile('name "(.*)"')
while True:
def revert_latexcommand_index(document):
- "Revert from collapsable form to LatexCommand form."
+ "Revert from collapsible form to LatexCommand form."
i = 0
while True:
i = find_token(document.body, "\\begin_inset Index", i)
def revert_inset_info(document):
'Replace info inset with its content'
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset Info', i)
if i == -1:
return
def convert_framed_notes(document):
"Convert framed notes to boxes. "
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Note Framed", "\\begin_inset Note Shaded"], i)
if i == -1:
return
def revert_framed_notes(document):
"Revert framed boxes to notes. "
i = 0
- while 1:
+ while True:
i = find_tokens(document.body, ["\\begin_inset Box Framed", "\\begin_inset Box Shaded"], i)
if i == -1:
def revert_nocite(document):
"Revert LatexCommand nocite to ERT"
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CommandInset citation", i)
if i == -1:
return
def revert_rotfloat(document):
" Revert sideways custom floats. "
i = 0
- while 1:
+ while True:
# whitespace intended (exclude \\begin_inset FloatList)
i = find_token(document.body, "\\begin_inset Float ", i)
if i == -1:
def revert_widesideways(document):
" Revert wide sideways floats. "
i = 0
- while 1:
+ while True:
# whitespace intended (exclude \\begin_inset FloatList)
i = find_token(document.body, '\\begin_inset Float ', i)
if i == -1:
def revert_inset_embedding(document, type):
' Remove embed tag from certain type of insets'
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset %s" % type, i)
if i == -1:
return
def convert_subfig(document):
" Convert subfigures to subfloats. "
i = 0
- while 1:
+ while True:
addedLines = 0
i = find_token(document.body, '\\begin_inset Graphics', i)
if i == -1:
def revert_subfig(document):
" Revert subfloats. "
i = 0
- while 1:
+ while True:
# whitespace intended (exclude \\begin_inset FloatList)
i = find_tokens(document.body, ['\\begin_inset Float ', '\\begin_inset Wrap'], i)
if i == -1:
def revert_pdfpages(document):
' Revert pdfpages external inset to ERT '
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset External", i)
if i == -1:
return
def revert_graphics_group(document):
' Revert group information from graphics insets '
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Graphics", i)
if i == -1:
return
"Paragraph*": "Paragraph",
"Subparagraph*": "Subparagraph"}
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout", i)
if i == -1:
return
# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
-# -*- coding: utf-8 -*-
# Copyright (C) 2011 The LyX team
#
# This program is free software; you can redistribute it and/or
find_token_exact, find_end_of_inset, find_end_of_layout, \
find_token_backwards, is_in_inset, get_value, get_quoted_value, \
del_token, check_token, get_option_value
-
+
from lyx2lyx_tools import add_to_preamble, insert_to_preamble, \
put_cmd_in_ert, lyx2latex, latex_length, revert_flex_inset, \
revert_font_attrs, hex2ratio, str2bool
i = end
continue
substi = ["\\begin_inset ERT", "status collapsed", "",
- "\\begin_layout Plain Layout", "", "", "\\backslash",
+ "\\begin_layout Plain Layout", "", "", "\\backslash",
cmd + "{", "\\end_layout", "", "\\end_inset"]
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
def revert_phantom(document):
revert_phantom_types(document, "Phantom", "phantom")
-
+
def revert_hphantom(document):
revert_phantom_types(document, "HPhantom", "hphantom")
roman = sans = typew = "default"
osf = False
sf_scale = tt_scale = 100.0
-
+
i = find_token(document.header, "\\font_roman", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_roman.")
else:
sans = get_value(document.header, "\\font_sans", i)
document.header[i] = "\\font_sans default"
-
+
i = find_token(document.header, "\\font_typewriter", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_typewriter.")
else:
# we do not need this value.
document.header[i] = "\\font_sc false"
-
+
i = find_token(document.header, "\\font_sf_scale", 0)
if i == -1:
document.warning("Malformed LyX document: Missing \\font_sf_scale.")
pretext.append(tw)
if osf:
pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
- pretext.append('\usepackage{xunicode}')
- pretext.append('\usepackage{xltxtra}')
+ pretext.append('\\usepackage{xunicode}')
+ pretext.append('\\usepackage{xltxtra}')
insert_to_preamble(document, pretext)
def revert_outputformat(document):
" Remove default output format param "
-
+
if not del_token(document.header, '\\default_output_format', 0):
document.warning("Malformed LyX document: Missing \\default_output_format.")
preamble = []
if useindices:
preamble.append("\\usepackage{splitidx})")
-
+
# deal with index declarations in the preamble
i = 0
while True:
if k == -1:
document.warning("Malformed LyX document: Missing \\end_index.")
return
- if useindices:
+ if useindices:
line = document.header[i]
l = re.compile(r'\\index (.*)$')
m = l.match(line)
del document.header[i:k + 1]
if preamble:
insert_to_preamble(document, preamble)
-
+
# deal with index insets
# these need to have the argument removed
i = 0
subst = put_cmd_in_ert("\\sindex[" + itype + "]{" + content + "}")
document.body[i:k + 1] = subst
i = i + 1
-
+
# deal with index_print insets
i = 0
while True:
if document.body[i + 1].find('LatexCommand printindex') == -1:
document.warning("Malformed LyX document: Incomplete printindex inset.")
return
- subst = ["LatexCommand printindex",
+ subst = ["LatexCommand printindex",
"type \"idx\""]
document.body[i + 1:i + 2] = subst
i = i + 1
def revert_custom_processors(document):
" Remove bibtex_command and index_command params "
-
+
if not del_token(document.header, '\\bibtex_command', 0):
document.warning("Malformed LyX document: Missing \\bibtex_command.")
-
+
if not del_token(document.header, '\\index_command', 0):
document.warning("Malformed LyX document: Missing \\index_command.")
if j == -1:
i += 1
continue
- # FIXME Is this correct? It wipes out everything after the
+ # FIXME Is this correct? It wipes out everything after the
# one we found.
document.body[fline] = document.body[fline][:j - 1] + '>'
- # since there could be a tabular inside this one, we
+ # since there could be a tabular inside this one, we
# cannot jump to end.
i += 1
i = 0
anum = 1
re_author = re.compile(r'(\\author) (\".*\")\s*(.*)$')
-
+
while True:
i = find_token(document.header, "\\author", i)
if i == -1:
document.header[i] = "\\author %i %s %s" % (anum, name, email)
anum += 1
i += 1
-
+
i = 0
while True:
i = find_token(document.body, "\\change_", i)
def convert_mhchem(document):
"Set mhchem to off for versions older than 1.6.x"
- if document.start < 277:
+ if document.initial_format < 277:
# LyX 1.5.x and older did never load mhchem.
# Therefore we must switch it off: Documents that use mhchem have
# a manual \usepackage anyway, and documents not using mhchem but
if mhchem == "off":
# don't load case
- return
+ return
if mhchem == "auto":
i = 0
i += 1
if mhchem == "on":
- pre = ["\\PassOptionsToPackage{version=3}{mhchem}",
+ pre = ["\\PassOptionsToPackage{version=3}{mhchem}",
"\\usepackage{mhchem}"]
- insert_to_preamble(document, pre)
+ insert_to_preamble(document, pre)
def revert_fontenc(document):
"Verteiler": "cc",
"Gruss": "Closing"}
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout", i)
if i == -1:
break
document.body[i] = "\\begin_layout " + obsoletedby[layout]
i += 1
-
+
document.textclass = "g-brief"
document.set_textclass()
if i == -1:
begin_table = end_table
continue
-
+
# store the number of rows and columns
numrows = get_option_value(document.body[begin_table], "rows")
numcols = get_option_value(document.body[begin_table], "columns")
break
begin_cell = begin_row
multirows.append([])
- for column in range(numcols):
+ for column in range(numcols):
begin_cell = find_token(document.body, '<cell ', begin_cell, end_row)
if begin_cell == -1:
document.warning("Can't find column " + str(column + 1) + \
"in row " + str(row + 1))
break
- # NOTE
+ # NOTE
# this will fail if someone puts "</cell>" in a cell, but
# that seems fairly unlikely.
end_cell = find_end_of(document.body, begin_cell, '<cell', '</cell>')
else:
document.warning("Unable to match " + document.header[i])
document.header[i] = "\\html_use_mathml " + newval
-
+
def revert_inset_preview(document):
document.warning("Malformed LyX document: Could not find end of Preview inset.")
i += 1
continue
-
+
# This has several issues.
# We need to do something about the layouts inside InsetPreview.
# If we just leave the first one, then we have something like:
# \begin_layout Standard
# and we get a "no \end_layout" error. So something has to be done.
# Ideally, we would check if it is the same as the layout we are in.
- # If so, we just remove it; if not, we end the active one. But it is
+ # If so, we just remove it; if not, we end the active one. But it is
# not easy to know what layout we are in, due to depth changes, etc,
# and it is not clear to me how much work it is worth doing. In most
# cases, the layout will probably be the same.
- #
+ #
# For the same reason, we have to remove the \end_layout tag at the
# end of the last layout in the inset. Again, that will sometimes be
# wrong, but it will usually be right. To know what to do, we would
# again have to know what layout the inset is in.
-
+
blay = find_token(document.body, "\\begin_layout", i, iend)
if blay == -1:
document.warning("Can't find layout for preview inset!")
# This is where we would check what layout we are in.
# The check for Standard is definitely wrong.
- #
+ #
# lay = document.body[blay].split(None, 1)[1]
# if lay != oldlayout:
# # record a boolean to tell us what to do later....
# # better to do it later, since (a) it won't mess up
# # the numbering and (b) we only modify at the end.
-
+
# we want to delete the last \\end_layout in this inset, too.
# note that this may not be the \\end_layout that goes with blay!!
bend = find_end_of_layout(document.body, blay)
del document.body[bend]
del document.body[i:blay + 1]
# we do not need to reset i
-
+
def revert_equalspacing_xymatrix(document):
" Revert a Formula with xymatrix@! to an ERT inset "
document.warning("Malformed LyX document: Could not find end of Formula inset.")
i += 1
continue
-
+
for curline in range(i,j):
found = document.body[curline].find("\\xymatrix@!")
if found != -1:
break
-
+
if found != -1:
has_equal_spacing = True
content = [document.body[i][21:]]
has_preamble = True;
break;
i = j + 1
-
+
if has_equal_spacing and not has_preamble:
add_to_preamble(document, ['\\usepackage[all]{xy}'])
# are there any grey notes?
if find_token(document.body, "\\begin_inset Note Greyedout", 0) == -1:
- # no need to do anything else, and \renewcommand will throw
+ # no need to do anything else, and \renewcommand will throw
# an error since lyxgreyedout will not exist.
return
def revert_turkmen(document):
- "Set language Turkmen to English"
+ "Set language Turkmen to English"
- if document.language == "turkmen":
- document.language = "english"
- i = find_token(document.header, "\\language", 0)
- if i != -1:
- document.header[i] = "\\language english"
+ if document.language == "turkmen":
+ document.language = "english"
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
- j = 0
- while True:
- j = find_token(document.body, "\\lang turkmen", j)
- if j == -1:
- return
- document.body[j] = document.body[j].replace("\\lang turkmen", "\\lang english")
- j += 1
+ j = 0
+ while True:
+ j = find_token(document.body, "\\lang turkmen", j)
+ if j == -1:
+ return
+ document.body[j] = document.body[j].replace("\\lang turkmen", "\\lang english")
+ j += 1
def revert_fontcolor(document):
pass
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset Info', i)
if i == -1:
return
m = l.match(line)
if m:
document.header[i] = "\\html_latex_start " + m.group(1)
-
+
i = find_token(document.header, '\\html_latex_end', 0)
if i != -1:
line = document.header[i]
m = l.match(line)
if m:
document.header[i] = "\\html_latex_end " + m.group(1)
-
+
def revert_html_quotes(document):
" Remove quotes around html_latex_start and html_latex_end "
-
+
i = find_token(document.header, '\\html_latex_start', 0)
if i != -1:
line = document.header[i]
del document.header[i]
else:
document.header[i] = "\\html_latex_start \"" + m.group(1) + "\""
-
+
i = find_token(document.header, '\\html_latex_end', 0)
if i != -1:
line = document.header[i]
def convert_optarg(document):
" Convert \\begin_inset OptArg to \\begin_inset Argument "
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset OptArg', i)
if i == -1:
return
def revert_argument(document):
" Convert \\begin_inset Argument to \\begin_inset OptArg "
i = 0
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset Argument', i)
if i == -1:
return
document.body[k] = "LatexCommand formatted"
i = j + 1
document.header.insert(-1, "\\use_refstyle 0")
-
-
+
+
def revert_refstyle(document):
" Reverts neutral formatted refs to prettyref "
re_ref = re.compile("^reference\s+\"(\w+):(\S+)\"")
i = find_token(document.header, "\\use_refstyle", 0)
if i != -1:
document.header.pop(i)
-
+
def revert_nameref(document):
" Convert namerefs to regular references "
for cmd in cmds:
i = 0
oldcmd = "LatexCommand " + cmd
- while 1:
+ while True:
# It seems better to look for this, as most of the reference
# insets won't be ones we care about.
i = find_token(document.body, oldcmd, i)
document.body[stins:endins + 1] = newcontent
if foundone:
- add_to_preamble(document, ["\usepackage{nameref}"])
+ add_to_preamble(document, ["\\usepackage{nameref}"])
def remove_Nameref(document):
" Convert Nameref commands to nameref commands "
i = 0
- while 1:
+ while True:
# It seems better to look for this, as most of the reference
# insets won't be ones we care about.
i = find_token(document.body, "LatexCommand Nameref" , i)
break
cmdloc = i
i += 1
-
+
# Make sure it is actually in an inset!
val = is_in_inset(document.body, cmdloc, \
"\\begin_inset CommandInset ref")
def convert_flexnames(document):
"Convert \\begin_inset Flex Custom:Style to \\begin_inset Flex Style and similarly for CharStyle and Element."
-
+
i = 0
rx = re.compile(r'^\\begin_inset Flex (?:Custom|CharStyle|Element):(.+)$')
while True:
flexlist = flex_insets
else:
flexlist = flex_elements
-
+
rx = re.compile(r'^\\begin_inset Flex\s+(.+)$')
i = 0
while True:
# force load case
add_to_preamble(document, ["\\usepackage{mathdots}"])
return
-
+
# so we are in the auto case. we want to load mathdots if \iddots is used.
i = 0
while True:
def convert_rule(document):
" Convert \\lyxline to CommandInset line. "
i = 0
-
+
inset = ['\\begin_inset CommandInset line',
'LatexCommand rule',
'offset "0.5ex"',
def revert_rule(document):
" Revert line insets to Tex code "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset CommandInset line" , i)
if i == -1:
return
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of Formula inset.")
- return
+ return
lines = "\n".join(document.body[i:j])
if lines.find("\\Diagram") == -1:
i = j
# only need to do it once!
return
-chapters = ("amsbook", "book", "docbook-book", "elsart", "extbook", "extreport",
- "jbook", "jreport", "jsbook", "literate-book", "literate-report", "memoir",
- "mwbk", "mwrep", "recipebook", "report", "scrbook", "scrreprt", "svmono",
+chapters = ("amsbook", "book", "docbook-book", "elsart", "extbook", "extreport",
+ "jbook", "jreport", "jsbook", "literate-book", "literate-report", "memoir",
+ "mwbk", "mwrep", "recipebook", "report", "scrbook", "scrreprt", "svmono",
"svmult", "tbook", "treport", "tufte-book")
def convert_bibtex_clearpage(document):
document.warning("Can't find options for bibliography inset at line " + str(j))
j = k
continue
-
+
if val.find("bibtotoc") == -1:
j = k
continue
-
+
# so we want to insert a new page right before the paragraph that
- # this bibliography thing is in.
+ # this bibliography thing is in.
lay = find_token_backwards(document.body, "\\begin_layout", j)
if lay == -1:
document.warning("Can't find layout containing bibliography inset at line " + str(j))
" http://www.mail-archive.com/lyx-devel@lists.lyx.org/msg161298.html "
if not check_passthru:
return
-
+
rx = re.compile("\\\\begin_layout \s*(\w+)")
beg = 0
for lay in ["Chunk", "Scrap"]:
beg = end + 1
if didit:
beg += 4 # for the extra layout
-
+
def revert_passthru(document):
" http://www.mail-archive.com/lyx-devel@lists.lyx.org/msg161298.html "
document.warning("Can't find end of layout at line " + str(beg))
beg += 1
continue
-
+
# we now want to find out if the next layout is the
# same as this one. but we will need to do this over and
# over again.
# but first let's check and make sure there is no content between the
# two layouts. i'm not sure if that can happen or not.
for l in range(end + 1, next):
- document.warning("c'" + document.body[l] + "'")
if document.body[l] != "":
document.warning("Found content between adjacent " + lay + " layouts!")
break
break
empty = True
for l in range(next + 1, nextend):
- document.warning("e'" + document.body[l] + "'")
if document.body[l] != "":
empty = False
break
if i == -1:
begin_table = end_table
continue
-
+
# store the number of rows and columns
numrows = get_option_value(document.body[begin_table], "rows")
numcols = get_option_value(document.body[begin_table], "columns")
break
begin_cell = begin_row
multirows.append([])
- for column in range(numcols):
+ for column in range(numcols):
begin_cell = find_token(document.body, '<cell ', begin_cell, end_row)
if begin_cell == -1:
document.warning("Can't find column " + str(column + 1) + \
"in row " + str(row + 1))
break
- # NOTE
+ # NOTE
# this will fail if someone puts "</cell>" in a cell, but
# that seems fairly unlikely.
end_cell = find_end_of(document.body, begin_cell, '<cell', '</cell>')
replace('valignment="middle"', 'valignment="top"')
# remove mroffset option
document.body[bcell] = rgx.sub('', document.body[bcell])
-
+
blay = find_token(document.body, "\\begin_layout", bcell, ecell)
if blay == -1:
document.warning("Can't find layout for cell!")
" Convert subscript/superscript inset to TeX code "
i = 0
foundsubscript = False
- while 1:
+ while True:
i = find_token(document.body, '\\begin_inset script', i)
if i == -1:
break
# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
-# -*- coding: utf-8 -*-
# Copyright (C) 2011 The LyX team
#
# This program is free software; you can redistribute it and/or
isInset must be true, if braces inside an InsetLayout needs to be converted
isEnvironment must be true, if the layout is for a LaTeX environment
isOpt must be true, if the argument is an optional one
-
+
Todo: this routine can currently handle only one mandatory argument of environments
'''
n += 1
endn = end
loop += 1
- else:
+ else:
# no brace pair found
# now check the case that we have "}" + "{" in two ERTs
if opt:
def revert_australian(document):
- "Set English language variants Australian and Newzealand to English"
+ "Set English language variants Australian and Newzealand to English"
- if document.language == "australian" or document.language == "newzealand":
+ if document.language == "australian" or document.language == "newzealand":
document.language = "english"
- i = find_token(document.header, "\\language", 0)
- if i != -1:
- document.header[i] = "\\language english"
- j = 0
- while True:
- j = find_token(document.body, "\\lang australian", j)
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = 0
+ while True:
+ j = find_token(document.body, "\\lang australian", j)
if j == -1:
j = find_token(document.body, "\\lang newzealand", 0)
if j == -1:
else:
document.body[j] = document.body[j].replace("\\lang newzealand", "\\lang english")
else:
- document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
+ document.body[j] = document.body[j].replace("\\lang australian", "\\lang english")
j += 1
if get_option_value(document.body[begin_row], 'endlastfoot') == 'true':
document.body[begin_row] = set_option_value(document.body[begin_row], 'endlastfoot', 'false')
begin_row = end_row
- # since there could be a tabular inside this one, we
+ # since there could be a tabular inside this one, we
# cannot jump to end.
begin_table += 1
'begin{%s}' % (latex_name),
'\\end_layout', '', '\\begin_layout Plain Layout', '']
- while 1:
+ while True:
i = find_token(document.body, "\\begin_layout %s" % (layout_name), i)
if i == -1:
return
continue
# delete all line breaks insets (there are no other insets)
l = i
- while 1:
+ while True:
n = find_token(document.body, "\\begin_inset Newline newline", l, j)
if n == -1:
n = find_token(document.body, "\\begin_inset Newline linebreak", l, j)
def revert_tipa(document):
" Revert native TIPA insets to mathed or ERT. "
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset IPA", i)
if i == -1:
return
put_cmd_in_ert("\\end{turn}")
document.body[i + 4 : i + 4] = \
put_cmd_in_ert("\\begin{turn}{" + value + "}")
-
+
i += 1
-
+
finally:
if load_rotating:
- add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
+ add_to_preamble(document, ["\\@ifundefined{turnbox}{\\usepackage{rotating}}{}"])
def convert_cell_rotation(document):
rgx = re.compile(r'rotate="[^"]+?"')
# convert "true" to "90"
document.body[i] = rgx.sub('rotate="90"', document.body[i])
-
+
i += 1
put_cmd_in_ert("\\end{turn}")
document.body[i - 2 : i - 2] = \
put_cmd_in_ert("\\begin{turn}{" + value + "}")
-
+
i += 1
-
+
finally:
if load_rotating:
- add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
+ add_to_preamble(document, ["\\@ifundefined{turnbox}{\\usepackage{rotating}}{}"])
def convert_table_rotation(document):
rgx = re.compile(r'rotate="[^"]+?"')
# convert "true" to "90"
document.body[i] = rgx.sub('rotate="90"', document.body[i])
-
+
i += 1
def revert_ancientgreek(document):
- "Set the document language for ancientgreek to greek"
+ "Set the document language for ancientgreek to greek"
- if document.language == "ancientgreek":
+ if document.language == "ancientgreek":
document.language = "greek"
- i = find_token(document.header, "\\language", 0)
- if i != -1:
- document.header[i] = "\\language greek"
- j = 0
- while True:
- j = find_token(document.body, "\\lang ancientgreek", j)
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language greek"
+ j = 0
+ while True:
+ j = find_token(document.body, "\\lang ancientgreek", j)
if j == -1:
return
else:
- document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
+ document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
j += 1
def revert_languages(document):
- "Set the document language for new supported languages to English"
+ "Set the document language for new supported languages to English"
languages = [
"coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
for n in range(len(languages)):
if document.language == languages[n]:
document.language = "english"
- i = find_token(document.header, "\\language", 0)
- if i != -1:
- document.header[i] = "\\language english"
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
j = 0
- while j < len(document.body):
+ while j < len(document.body):
j = find_token(document.body, "\\lang " + languages[n], j)
if j != -1:
document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
def convert_armenian(document):
- "Use polyglossia and thus non-TeX fonts for Armenian"
+ "Use polyglossia and thus non-TeX fonts for Armenian"
- if document.language == "armenian":
- i = find_token(document.header, "\\use_non_tex_fonts", 0)
- if i != -1:
- document.header[i] = "\\use_non_tex_fonts true"
+ if document.language == "armenian":
+ i = find_token(document.header, "\\use_non_tex_fonts", 0)
+ if i != -1:
+ document.header[i] = "\\use_non_tex_fonts true"
def revert_armenian(document):
- "Use ArmTeX and thus TeX fonts for Armenian"
+ "Use ArmTeX and thus TeX fonts for Armenian"
- if document.language == "armenian":
- i = find_token(document.header, "\\use_non_tex_fonts", 0)
- if i != -1:
- document.header[i] = "\\use_non_tex_fonts false"
+ if document.language == "armenian":
+ i = find_token(document.header, "\\use_non_tex_fonts", 0)
+ if i != -1:
+ document.header[i] = "\\use_non_tex_fonts false"
def revert_libertine(document):
- " Revert native libertine font definition to LaTeX "
+ " Revert native libertine font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_roman libertine", 0)
if i != -1:
osf = False
def revert_txtt(document):
- " Revert native txtt font definition to LaTeX "
+ " Revert native txtt font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_typewriter txtt", 0)
if i != -1:
preamble = "\\renewcommand{\\ttdefault}{txtt}"
def revert_mathdesign(document):
- " Revert native mathdesign font definition to LaTeX "
+ " Revert native mathdesign font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"mdbch": "charter",
"mdput": "utopia",
def revert_texgyre(document):
- " Revert native TeXGyre font definition to LaTeX "
+ " Revert native TeXGyre font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
"tgheros", "tgpagella", "tgschola", "tgtermes"]
i = find_token(document.header, "\\font_roman", 0)
i = end
continue
substi = ["\\begin_inset ERT", "status collapsed", "",
- "\\begin_layout Plain Layout", "", "", "\\backslash",
+ "\\begin_layout Plain Layout", "", "", "\\backslash",
decotype + "{", "\\end_layout", "", "\\end_inset"]
substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
"\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
def revert_minionpro(document):
- " Revert native MinionPro font definition to LaTeX "
+ " Revert native MinionPro font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_roman minionpro", 0)
if i != -1:
osf = False
def revert_mathfonts(document):
- " Revert native math font definitions to LaTeX "
+ " Revert native math font definitions to LaTeX "
i = find_token(document.header, "\\font_math", 0)
if i == -1:
return
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
if val == "eulervm":
add_to_preamble(document, "\\usepackage{eulervm}")
def revert_mdnomath(document):
- " Revert mathdesign and fourier without math "
+ " Revert mathdesign and fourier without math "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"md-charter": "mdbch",
"md-utopia": "mdput",
def convert_mdnomath(document):
- " Change mathdesign font name "
+ " Change mathdesign font name "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
mathdesign_dict = {
"mdbch": "md-charter",
"mdput": "md-utopia",
def revert_newtxmath(document):
- " Revert native newtxmath definitions to LaTeX "
+ " Revert native newtxmath definitions to LaTeX "
i = find_token(document.header, "\\font_math", 0)
if i == -1:
return
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
mathfont_dict = {
"libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
def revert_biolinum(document):
- " Revert native biolinum font definition to LaTeX "
+ " Revert native biolinum font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_sans biolinum", 0)
if i != -1:
osf = False
document.body[i] = "\\begin_inset Argument 999"
i += 1
continue
-
+
# Find containing paragraph layout
parent = get_containing_layout(document.body, i)
if parent == False:
def convert_literate(document):
" Convert Literate document to new format"
- i = find_token(document.header, "\\textclass", 0)
+ i = find_token(document.header, "\\textclass", 0)
if (i != -1) and "literate-" in document.header[i]:
document.textclass = document.header[i].replace("\\textclass literate-", "")
j = find_token(document.header, "\\begin_modules", 0)
def revert_garamondx_newtxmath(document):
- " Revert native garamond newtxmath definition to LaTeX "
+ " Revert native garamond newtxmath definition to LaTeX "
i = find_token(document.header, "\\font_math", 0)
if i == -1:
return
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
if val == "garamondx-ntxm":
add_to_preamble(document, "\\usepackage[garamondx]{newtxmath}")
def revert_garamondx(document):
- " Revert native garamond font definition to LaTeX "
+ " Revert native garamond font definition to LaTeX "
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
i = find_token(document.header, "\\font_roman garamondx", 0)
if i != -1:
osf = False
def convert_beamerargs(document):
" Converts beamer arguments to new layout "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
i = 0
while True:
i = find_token(document.body, "\\begin_layout AgainFrame", i)
def convert_corollary_args(document):
" Converts beamer corrolary-style ERT arguments native InsetArgs "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
corollary_layouts = ["Corollary", "Definition", "Definitions", "Example", "Examples", "Fact", "Proof", "Theorem"]
for lay in corollary_layouts:
i = 0
def convert_quote_args(document):
" Converts beamer quote style ERT args to native InsetArgs "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
quote_layouts = ["Uncover", "Only", "Quotation", "Quote", "Verse"]
for lay in quote_layouts:
i = 0
def revert_beamerargs(document):
" Reverts beamer arguments to old layout "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
del document.body[p : endInset + 1]
subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
document.body[realparbeg : realparbeg] = subst
-
+
i = realparend
def revert_beamerargs2(document):
" Reverts beamer arguments to old layout, step 2 "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
if m:
argnr = m.group(1)
if argnr == "2":
- document.body[p] = "\\begin_inset Argument 1"
+ document.body[p] = "\\begin_inset Argument 1"
if layoutname in corollary_layouts:
m = rx.match(document.body[p])
if m:
def revert_beamerargs3(document):
" Reverts beamer arguments to old layout, step 3 "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
def revert_beamerflex(document):
" Reverts beamer Flex insets "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
document.body[i : beginPlain + 1] = pre
post = put_cmd_in_ert("}")
document.body[z - 2 : z + 1] = post
-
+
i += 1
def revert_beamerblocks(document):
" Reverts beamer block arguments to ERT "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
def convert_beamerblocks(document):
" Converts beamer block ERT args to native InsetArgs "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
blocks = ["Block", "ExampleBlock", "AlertBlock"]
for lay in blocks:
i = 0
document.body[ertcontlastline : ertcontlastline + 1] = [
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
- '\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
+ '\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
'\\end_inset', '', '', '\\begin_inset Argument 2',
'status collapsed', '', '\\begin_layout Plain Layout',
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
document.body[ertcontlastline : ertcontlastline + 1] = [
document.body[ertcontlastline], '\\end_layout', '', '\\end_inset']
document.body[ertcontdivline : ertcontdivlinetwo + 1] = [document.body[ertcontdivline][:tok],
- '\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
+ '\\end_layout', '', '\\end_inset', '', '\\end_layout', '',
'\\end_inset', '', '', '\\begin_inset Argument 1',
'status collapsed', '', '\\begin_layout Plain Layout',
'\\begin_inset ERT', '', 'status open' '', '\\begin_layout Plain Layout',
def convert_overprint(document):
" Convert old beamer overprint layouts to ERT "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
# Remove arg inset
del document.body[argbeg : argend + 1]
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
-
+
endseq = endseq - len(document.body[i : i])
document.body[i : i] = subst + ["\\end_layout"]
endseq += len(subst)
-
+
for p in range(i, endseq):
if document.body[p] == "\\begin_layout Overprint":
document.body[p] = "\\begin_layout Standard"
def revert_overprint(document):
" Revert old beamer overprint layouts to ERT "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
# Remove arg inset
del document.body[argbeg : argend + 1]
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
-
+
endseq = endseq - len(document.body[i : i])
document.body[i : i] = subst + ["\\end_layout"]
endseq += len(subst)
-
+
p = i
while True:
if p >= endseq:
def revert_frametitle(document):
" Reverts beamer frametitle layout to ERT "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
# Remove arg inset
del document.body[p : endInset + 1]
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
-
+
subst += put_cmd_in_ert("{")
document.body[i : i + 1] = subst
i = endlay
def convert_epigraph(document):
" Converts memoir epigraph to new syntax "
-
+
if document.textclass != "memoir":
return
endlay += len(begsubst) + len(endsubst)
endlay = endlay - len(document.body[ert : endInset + 1])
del document.body[ert : endInset + 1]
-
+
i = endlay
def revert_epigraph(document):
" Reverts memoir epigraph argument to ERT "
-
+
if document.textclass != "memoir":
return
subst += put_cmd_in_ert("}{") + content
else:
subst += put_cmd_in_ert("}{")
-
+
document.body[j : j] = subst + document.body[j : j]
i = endlay
def convert_captioninsets(document):
" Converts caption insets to new syntax "
-
+
i = 0
while True:
i = find_token(document.body, "\\begin_inset Caption", i)
def revert_captioninsets(document):
" Reverts caption insets to old syntax "
-
+
i = 0
while True:
i = find_token(document.body, "\\begin_inset Caption Standard", i)
def revert_captionlayouts(document):
" Revert caption insets to caption layouts. "
-
+
caption_dict = {
"Above" : "Captionabove",
"Below" : "Captionbelow",
"Centered" : "CenteredCaption",
"Bicaption" : "Bicaption",
}
-
+
i = 0
rx = re.compile(r'^\\begin_inset Caption (\S+)$')
while True:
if val not in list(caption_dict.keys()):
i += 1
continue
-
+
# We either need to delete the previous \begin_layout line, or we
# need to end the previous layout if this inset is not in the first
# position of the paragraph.
def revert_fragileframe(document):
" Reverts beamer FragileFrame layout to ERT "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
subst += put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
elif p == 3:
subst += put_cmd_in_ert("[fragile]")
-
+
document.body[i : i + 1] = subst
i = j
def revert_newframes(document):
" Reverts beamer Frame and PlainFrame layouts to old forms "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
# Remove arg inset
del document.body[arg : endInset + 1]
subst += content
-
+
document.body[i : i + 1] = subst
i = j
def revert_kurier_fonts(document):
" Revert kurier font definition to LaTeX "
-
+
i = find_token(document.header, "\\font_math", 0)
if i != -1:
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
if val == "kurier-math":
add_to_preamble(document, "\\let\\Myrmdefault\\rmdefault\n" \
"\\usepackage[math]{kurier}\n" \
"\\renewcommand{\\rmdefault}{\\Myrmdefault}")
document.header[i] = "\\font_math auto"
-
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
kurier_fonts = ["kurier", "kurierc", "kurierl", "kurierlc"]
k = find_token(document.header, "\\font_sans kurier", 0)
if k != -1:
def revert_iwona_fonts(document):
" Revert iwona font definition to LaTeX "
-
+
i = find_token(document.header, "\\font_math", 0)
if i != -1:
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
val = get_value(document.header, "\\font_math", i)
if val == "iwona-math":
add_to_preamble(document, "\\let\\Myrmdefault\\rmdefault\n" \
"\\usepackage[math]{iwona}\n" \
"\\renewcommand{\\rmdefault}{\\Myrmdefault}")
document.header[i] = "\\font_math auto"
-
- if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
iwona_fonts = ["iwona", "iwonac", "iwonal", "iwonalc"]
k = find_token(document.header, "\\font_sans iwona", 0)
if k != -1:
def revert_new_libertines(document):
" Revert new libertine font definition to LaTeX "
-
+
if find_token(document.header, "\\use_non_tex_fonts true", 0) != -1:
return
preamble += "{libertineMono-type1}"
add_to_preamble(document, [preamble])
document.header[i] = "\\font_typewriter default"
-
+
k = find_token(document.header, "\\font_sans biolinum", 0)
if k != -1:
preamble = "\\usepackage"
def convert_lyxframes(document):
" Converts old beamer frames to new style "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
framebeg = ["BeginFrame", "BeginPlainFrame"]
frameend = ["Frame", "PlainFrame", "EndFrame", "BeginFrame", "BeginPlainFrame", "AgainFrame",
"Section", "Section*", "Subsection", "Subsection*", "Subsubsection", "Subsubsection*"]
def remove_endframes(document):
" Remove deprecated beamer endframes "
-
+
beamer_classes = ["beamer", "article-beamer", "scrarticle-beamer"]
if document.textclass not in beamer_classes:
return
-
+
i = 0
while True:
i = find_token_exact(document.body, "\\begin_layout EndFrame", i)
def revert_powerdot_flexes(document):
" Reverts powerdot flex insets "
-
+
if document.textclass != "powerdot":
return
z += len(pre)
document.body[i : beginPlain + 1] = pre
post = put_cmd_in_ert("}")
- document.body[z - 2 : z + 1] = post
+ document.body[z - 2 : z + 1] = post
i += 1
def revert_powerdot_pause(document):
" Reverts powerdot pause layout to ERT "
-
+
if document.textclass != "powerdot":
return
# Remove arg inset
del document.body[p : endInset + 1]
subst += put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
-
+
document.body[i : i + 1] = subst
i = endlay
def revert_powerdot_itemargs(document):
" Reverts powerdot item arguments to ERT "
-
+
if document.textclass != "powerdot":
return
del document.body[i:j+1]
subst = put_cmd_in_ert("<") + content + put_cmd_in_ert(">")
document.body[realparbeg : realparbeg] = subst
-
+
i = realparend
# Remove arg inset
del document.body[p : endInset + 1]
subst += put_cmd_in_ert("{") + content + put_cmd_in_ert("}")
-
+
subst += put_cmd_in_ert("{")
document.body[i : i + 1] = subst
i = endlay
def revert_starred_caption(document):
" Reverts unnumbered longtable caption insets "
-
+
i = 0
while True:
i = find_token(document.body, "\\begin_inset Caption LongTableNoNumber", i)
def revert_tibetan(document):
- "Set the document language for Tibetan to English"
+ "Set the document language for Tibetan to English"
if document.language == "tibetan":
document.language = "english"
- i = find_token(document.header, "\\language", 0)
- if i != -1:
- document.header[i] = "\\language english"
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
j = 0
- while j < len(document.body):
+ while j < len(document.body):
j = find_token(document.body, "\\lang tibetan", j)
if j != -1:
document.body[j] = document.body[j].replace("\\lang tibetan", "\\lang english")
# there is no point continuing, as we will run into the same error again.
return
this_chunk = "".join(document.body[i + 1:j])
-
+
# there may be empty lines between chunks
# we just skip them.
if not chunk_started:
if this_chunk != "":
# new chunk starts
chunk_started = True
-
+
if chunk_started:
contents.append(document.body[i + 1:j])
# the first par (separated from the options by a newline).
# We collect such stuff to re-insert it later.
postoptstuff = []
-
+
match = first_re.search(optarg)
if match:
optarg = match.groups()[0]
# replace old content with new content
document.body[lstart : lend + 1] = newlines
i = lstart + len(newlines)
-
+
##
# Conversion hub
# -*- coding: utf-8 -*-
# This file is part of lyx2lyx
-# -*- coding: utf-8 -*-
# Copyright (C) 2015 The LyX team
#
# This program is free software; you can redistribute it and/or
if nolastopt == False:
document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
else:
- document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
+ document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
del(document.body[lineArg : beginPlain + 1])
wasOpt = False
else:
}
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_deeper", i)
if i == -1:
break
i = i + 1
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\align", i)
if i == -1:
break
j = find_token_backwards(document.body, "\\end_layout", i-1)
if j != -1:
+ # Very old LyX files do not have Plain Layout in insets (but Standard).
+ # So we additionally check here if there is no inset boundary
+ # between the previous layout and this one.
+ n = find_token(document.body, "\\end_inset", j, lay[1])
+ if n != -1:
+ i = i + 1
+ continue
lay = get_containing_layout(document.body, j-1)
if lay != False and lay[0] == "Standard" \
and find_token(document.body, "\\align", lay[1], lay[2]) == -1 \
regexp = re.compile(r'^\\begin_layout (?:(-*)|(\s*))(Separator|EndOfSlide)(?:(-*)|(\s*))$', re.IGNORECASE)
i = 0
- while 1:
+ while True:
i = find_re(document.body, regexp, i)
if i == -1:
return
"", "\\end_inset", ""]
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Separator", i)
if i == -1:
return
"""
parbreakinset = "\\begin_inset Separator parbreak"
i = 0
- while 1:
+ while True:
i = find_token(document.body, parbreakinset, i)
if i == -1:
return
Revert latexpar separators to parbreak separators.
"""
i = 0
- while 1:
+ while True:
i = find_token(document.body, "\\begin_inset Separator latexpar", i)
if i == -1:
return
i = 0
while i < len(document.body):
words = document.body[i].split()
- if len(words) > 1 and words[0] == "\\begin_inset" and \
- words[1] in ["CommandInset", "ERT", "External", "Formula", "FormulaMacro", "Graphics", "IPA", "listings"]:
+ if (len(words) > 1 and words[0] == "\\begin_inset"
+ and (words[1] in ["CommandInset", "ERT", "External", "Formula",
+ "FormulaMacro", "Graphics", "IPA", "listings"]
+ or ' '.join(words[1:]) == "Flex Code")):
# must not replace anything in insets that store LaTeX contents in .lyx files
- # (math and command insets withut overridden read() and write() methods
+ # (math and command insets without overridden read() and write() methods
# filtering out IPA makes Text::readParToken() more simple
# skip ERT as well since it is not needed there
+ # Flex Code is logical markup, typically rendered as typewriter
j = find_end_of_inset(document.body, i)
if j == -1:
document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
else:
i = j
continue
+ if document.body[i] == "\\begin_layout LyX-Code":
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: "
+ "Can't find end of %s layout at line %d" % (words[1],i))
+ i += 1
+ else:
+ i = j
+ continue
+
if len(words) > 0 and words[0] in ["\\leftindent", "\\paragraph_spacing", "\\align", "\\labelwidthstring"]:
# skip paragraph parameters (bug 10243)
i += 1
def revert_dashes(document):
"convert \\twohyphens and \\threehyphens to -- and ---"
+ # eventually remove preamble code from 2.3->2.2 conversion:
+ for i, line in enumerate(document.preamble):
+ if i > 1 and line == r'\renewcommand{\textemdash}{---}':
+ if (document.preamble[i-1] == r'\renewcommand{\textendash}{--}'
+ and document.preamble[i-2] == '% Added by lyx2lyx'):
+ del document.preamble[i-2:i+1]
i = 0
while i < len(document.body):
words = document.body[i].split()
else:
i = j
continue
- for key, value in specialchars.iteritems():
+ for key, value in specialchars.items():
if forward:
document.body[i] = document.body[i].replace("\\SpecialChar " + key, "\\SpecialChar " + value)
document.body[i] = document.body[i].replace("\\SpecialCharNoPassThru " + key, "\\SpecialCharNoPassThru " + value)
if i == -1:
document.warning("Malformed LyX document: No \\textclass!!")
return
- if document.dir == "":
- origin = "stdin"
+ if document.dir == u'':
+ origin = u'stdin'
else:
- relpath = ''
- if document.systemlyxdir and document.systemlyxdir != '':
+ relpath = u''
+ if document.systemlyxdir and document.systemlyxdir != u'':
try:
if os.path.isabs(document.dir):
absdir = os.path.normpath(document.dir)
else:
abssys = os.path.normpath(os.path.abspath(document.systemlyxdir))
relpath = os.path.relpath(absdir, abssys)
- if relpath.find('..') == 0:
- relpath = ''
+ if relpath.find(u'..') == 0:
+ relpath = u''
except:
- relpath = ''
- if relpath == '':
- origin = document.dir.replace('\\', '/') + '/'
+ relpath = u''
+ if relpath == u'':
+ origin = document.dir.replace(u'\\', u'/') + u'/'
else:
- origin = os.path.join("/systemlyxdir", relpath).replace('\\', '/') + '/'
- origin = unicode(origin, sys.getfilesystemencoding())
+ origin = os.path.join(u"/systemlyxdir", relpath).replace(u'\\', u'/') + u'/'
document.header[i:i] = ["\\origin " + origin]
j = find_token(document.body, "\\color", i + 1)
k = find_token(document.body, "\\end_layout", i + 1)
if j == -1 and k != -1:
- j = k +1
+ j = k +1
# output TeX code
# first output the closing brace
if k < j:
def revert_colorbox(document):
" outputs color settings for boxes as TeX code "
-
+
i = 0
defaultframecolor = "black"
defaultbackcolor = "none"
def revert_moderncv_1(document):
" Reverts the new inset of moderncv to TeX-code in preamble "
-
+
if document.textclass != "moderncv":
return
i = 0
def revert_moderncv_2(document):
" Reverts the phone inset of moderncv to the obsoleted mobile or fax "
-
+
if document.textclass != "moderncv":
return
i = 0
def revert_achemso(document):
" Reverts the flex inset Latin to TeX code "
-
+
if document.textclass != "achemso":
return
i = 0
--- /dev/null
+# -*- coding: utf-8 -*-
+# This file is part of lyx2lyx
+# Copyright (C) 2016 The LyX team
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+""" Convert files to the file format generated by lyx 2.3"""
+
+import re, string
+import unicodedata
+import sys, os
+
+# Uncomment only what you need to import, please.
+
+from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
+ find_token, find_end_of_inset, get_value, get_bool_value, \
+ get_containing_layout, get_quoted_value, del_token, find_re
+# find_tokens, find_token_exact, is_in_inset, \
+# check_token, get_option_value
+
+from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, revert_font_attrs, \
+ insert_to_preamble
+# get_ert, lyx2latex, \
+# lyx2verbatim, length_in_bp, convert_info_insets
+# latex_length, revert_flex_inset, hex2ratio, str2bool
+
+####################################################################
+# Private helper functions
+
+
+
+###############################################################################
+###
+### Conversion and reversion routines
+###
+###############################################################################
+
+def convert_microtype(document):
+ " Add microtype settings. "
+ i = find_token(document.header, "\\font_tt_scale" , 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
+ i = len(document.header) - 1
+
+ j = find_token(document.preamble, "\\usepackage{microtype}", 0)
+ if j == -1:
+ document.header.insert(i + 1, "\\use_microtype false")
+ else:
+ document.header.insert(i + 1, "\\use_microtype true")
+ del document.preamble[j]
+
+
+def revert_microtype(document):
+ " Remove microtype settings. "
+ i = find_token(document.header, "\\use_microtype", 0)
+ if i == -1:
+ return
+ use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
+ del document.header[i]
+ if use_microtype:
+ add_to_preamble(document, ["\\usepackage{microtype}"])
+
+
+def convert_dateinset(document):
+ ' Convert date external inset to ERT '
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset External", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Missing '\\end_inset' in convert_dateinset.")
+ i += 1
+ continue
+ if get_value(document.body, 'template', i, j) == "Date":
+ document.body[i : j + 1] = put_cmd_in_ert("\\today ")
+ i += 1
+ continue
+
+
+def convert_inputenc(document):
+ " Replace no longer supported input encoding settings. "
+ i = find_token(document.header, "\\inputenc", 0)
+ if i == -1:
+ return
+ if get_value(document.header, "\\inputencoding", i) == "pt254":
+ document.header[i] = "\\inputencoding pt154"
+
+
+def convert_ibranches(document):
+ ' Add "inverted 0" to branch insets'
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Branch", i)
+ if i == -1:
+ return
+ document.body.insert(i + 1, "inverted 0")
+ i += 1
+
+
+def revert_ibranches(document):
+ ' Convert inverted branches to explicit anti-branches'
+ # Get list of branches
+ ourbranches = {}
+ i = 0
+ while True:
+ i = find_token(document.header, "\\branch", i)
+ if i == -1:
+ break
+ branch = document.header[i][8:].strip()
+ if document.header[i+1].startswith("\\selected "):
+ #document.warning(document.header[i+1])
+ #document.warning(document.header[i+1][10])
+ selected = int(document.header[i+1][10])
+ else:
+ document.warning("Malformed LyX document: No selection indicator for branch " + branch)
+ selected = 1
+
+ # the value tells us whether the branch is selected
+ ourbranches[document.header[i][8:].strip()] = selected
+ i += 1
+
+ # Figure out what inverted branches, if any, have been used
+ # and convert them to "Anti-OldBranch"
+ ibranches = {}
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Branch", i)
+ if i == -1:
+ break
+ if not document.body[i+1].startswith("inverted "):
+ document.warning("Malformed LyX document: Missing 'inverted' tag!")
+ i += 1
+ continue
+ inverted = document.body[i+1][9]
+ #document.warning(document.body[i+1])
+
+ if inverted == "1":
+ branch = document.body[i][20:].strip()
+ #document.warning(branch)
+ if not branch in ibranches:
+ antibranch = "Anti-" + branch
+ while antibranch in ibranches:
+ antibranch = "x" + antibranch
+ ibranches[branch] = antibranch
+ else:
+ antibranch = ibranches[branch]
+ #document.warning(antibranch)
+ document.body[i] = "\\begin_inset Branch " + antibranch
+
+ # remove "inverted" key
+ del document.body[i+1]
+ i += 1
+
+ # now we need to add the new branches to the header
+ for old, new in ibranches.items():
+ i = find_token(document.header, "\\branch " + old, 0)
+ if i == -1:
+ document.warning("Can't find branch %s even though we found it before!" % (old))
+ continue
+ j = find_token(document.header, "\\end_branch", i)
+ if j == -1:
+ document.warning("Malformed LyX document! Can't find end of branch " + old)
+ continue
+ # ourbranches[old] - 1 inverts the selection status of the old branch
+ lines = ["\\branch " + new,
+ "\\selected " + str(ourbranches[old] - 1)]
+ # these are the old lines telling us color, etc.
+ lines += document.header[i+2 : j+1]
+ document.header[i:i] = lines
+
+
+def revert_beamer_article_styles(document):
+ " Include (scr)article styles in beamer article "
+
+ beamer_articles = ["article-beamer", "scrarticle-beamer"]
+ if document.textclass not in beamer_articles:
+ return
+
+ inclusion = "article.layout"
+ if document.textclass == "scrarticle-beamer":
+ inclusion = "scrartcl.layout"
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k - 1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (more [scr]article styles) ###",
+ "Input " + inclusion,
+ "Input beamer.layout",
+ "Provides geometry 0",
+ "Provides hyperref 0",
+ "DefaultFont",
+ " Family Roman",
+ " Series Medium",
+ " Shape Up",
+ " Size Normal",
+ " Color None",
+ "EndFont",
+ "Preamble",
+ " \\usepackage{beamerarticle,pgf}",
+ " % this default might be overridden by plain title style",
+ " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
+ " \\AtBeginDocument{",
+ " \\let\\origtableofcontents=\\tableofcontents",
+ " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
+ " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
+ " }",
+ "EndPreamble",
+ "### End of insertion by lyx2lyx (more [scr]article styles) ###"
+ ]
+
+
+def convert_beamer_article_styles(document):
+ " Remove included (scr)article styles in beamer article "
+
+ beamer_articles = ["article-beamer", "scrarticle-beamer"]
+ if document.textclass not in beamer_articles:
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
+ return
+
+ k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
+ document.header[k : l + 1] = []
+
+
+def revert_bosnian(document):
+ "Set the document language to English but assure Bosnian output"
+
+ if document.language == "bosnian":
+ document.language = "english"
+ i = find_token(document.header, "\\language bosnian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options bosnian")
+
+
+def revert_friulan(document):
+ "Set the document language to English but assure Friulan output"
+
+ if document.language == "friulan":
+ document.language = "english"
+ i = find_token(document.header, "\\language friulan", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options friulan")
+
+
+def revert_macedonian(document):
+ "Set the document language to English but assure Macedonian output"
+
+ if document.language == "macedonian":
+ document.language = "english"
+ i = find_token(document.header, "\\language macedonian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options macedonian")
+
+
+def revert_piedmontese(document):
+ "Set the document language to English but assure Piedmontese output"
+
+ if document.language == "piedmontese":
+ document.language = "english"
+ i = find_token(document.header, "\\language piedmontese", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options piedmontese")
+
+
+def revert_romansh(document):
+ "Set the document language to English but assure Romansh output"
+
+ if document.language == "romansh":
+ document.language = "english"
+ i = find_token(document.header, "\\language romansh", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options romansh")
+
+
+def revert_amharic(document):
+ "Set the document language to English but assure Amharic output"
+
+ if document.language == "amharic":
+ document.language = "english"
+ i = find_token(document.header, "\\language amharic", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{amharic}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_asturian(document):
+ "Set the document language to English but assure Asturian output"
+
+ if document.language == "asturian":
+ document.language = "english"
+ i = find_token(document.header, "\\language asturian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{asturian}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_kannada(document):
+ "Set the document language to English but assure Kannada output"
+
+ if document.language == "kannada":
+ document.language = "english"
+ i = find_token(document.header, "\\language kannada", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{kannada}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_khmer(document):
+ "Set the document language to English but assure Khmer output"
+
+ if document.language == "khmer":
+ document.language = "english"
+ i = find_token(document.header, "\\language khmer", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{khmer}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_urdu(document):
+ "Set the document language to English but assure Urdu output"
+
+ if document.language == "urdu":
+ document.language = "english"
+ i = find_token(document.header, "\\language urdu", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{urdu}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_syriac(document):
+ "Set the document language to English but assure Syriac output"
+
+ if document.language == "syriac":
+ document.language = "english"
+ i = find_token(document.header, "\\language syriac", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{syriac}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_quotes(document):
+ " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
+
+ # First handle verbatim insets
+ i = 0
+ j = 0
+ while i < len(document.body):
+ words = document.body[i].split()
+ if len(words) > 1 and words[0] == "\\begin_inset" and \
+ ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
+ i += 1
+ continue
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i, j)
+ if k == -1:
+ i += 1
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ else:
+ i += 1
+ continue
+
+ # Now verbatim layouts
+ i = 0
+ j = 0
+ while i < len(document.body):
+ words = document.body[i].split()
+ if len(words) > 1 and words[0] == "\\begin_layout" and \
+ words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
+ i += 1
+ continue
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i, j)
+ if k == -1:
+ i += 1
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ else:
+ i += 1
+ continue
+
+ # Now handle Hebrew
+ if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
+ return
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ hebrew = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ hebrew = document.language == "hebrew"
+ elif document.body[ql] == "\\lang hebrew":
+ hebrew = True
+ if hebrew:
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ i = l
+
+
+def revert_iopart(document):
+ " Input new styles via local layout "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k-1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (stdlayouts) ###",
+ "Input stdlayouts.inc",
+ "### End of insertion by lyx2lyx (stdlayouts) ###"
+ ]
+
+
+def convert_iopart(document):
+ " Remove local layout we added, if it is there "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
+ document.header[k : l + 1] = []
+
+
+def convert_quotestyle(document):
+ " Convert \\quotes_language to \\quotes_style "
+ i = find_token(document.header, "\\quotes_language", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_language!")
+ return
+ val = get_value(document.header, "\\quotes_language", i)
+ document.header[i] = "\\quotes_style " + val
+
+
+def revert_quotestyle(document):
+ " Revert \\quotes_style to \\quotes_language "
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_style!")
+ return
+ val = get_value(document.header, "\\quotes_style", i)
+ document.header[i] = "\\quotes_language " + val
+
+
+def revert_plainquote(document):
+ " Revert plain quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style plain", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes q', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ i = l
+
+
+def convert_frenchquotes(document):
+ " Convert french quote insets to swiss "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style french", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style swiss"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("f", "c", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swissquotes(document):
+ " Revert swiss quote insets to french "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swiss", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes c', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("c", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_britishquotes(document):
+ " Revert british quote insets to english "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style british", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes b', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("b", "e", 1)
+ if val[2] == "d":
+ # opening mark
+ newval = newval.replace("d", "s")
+ else:
+ # closing mark
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swedishgquotes(document):
+ " Revert swedish quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swedishg", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style danish"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes w', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "d":
+ # outer marks
+ newval = val.replace("w", "a", 1).replace("r", "l")
+ else:
+ # inner marks
+ newval = val.replace("w", "s", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchquotes(document):
+ " Revert french inner quote insets "
+
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("f", "e", 1).replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchinquotes(document):
+ " Revert inner frenchin quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style frenchin", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes i', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("i", "f", 1)
+ if val[2] == "s":
+ # inner marks
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_russianquotes(document):
+ " Revert russian quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style russian", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes r', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("r", "g", 1).replace("s", "d")
+ else:
+ # outer marks
+ newval = val.replace("r", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_dynamicquotes(document):
+ " Revert dynamic quote insets "
+
+ # First, revert header
+ i = find_token(document.header, "\\dynamic_quotes", 0)
+ if i != -1:
+ del document.header[i]
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ s = "e"
+ if style == "english":
+ s = "e"
+ elif style == "swedish":
+ s = "s"
+ elif style == "german":
+ s = "g"
+ elif style == "polish":
+ s = "p"
+ elif style == "swiss":
+ s = "c"
+ elif style == "danish":
+ s = "a"
+ elif style == "plain":
+ s = "q"
+ elif style == "british":
+ s = "b"
+ elif style == "swedishg":
+ s = "w"
+ elif style == "french":
+ s = "f"
+ elif style == "frenchin":
+ s = "i"
+ elif style == "russian":
+ s = "r"
+
+ # now transform the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ return
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+
+def revert_cjkquotes(document):
+ " Revert cjk quote insets "
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ global_cjk = style.find("cjk") != -1
+
+ if global_cjk:
+ document.header[i] = "\\quotes_style english"
+ # transform dynamic insets
+ s = "j"
+ if style == "cjkangle":
+ s = "k"
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ break
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+ cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes j', i)
+ if k == -1:
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u300E"]
+ else:
+ replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u300F"]
+ else:
+ replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300C"]
+ else:
+ replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300D"]
+ else:
+ replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes k', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u3008"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u3009"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300A"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300B"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+
+def revert_crimson(document):
+ " Revert native Cochineal/Crimson font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ preamble = ""
+ i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ document.header[j] = "\\font_osf false"
+ preamble += "[proportional,osf]"
+ preamble += "{cochineal}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = document.header[i].replace("cochineal", "default")
+
+
+def revert_cochinealmath(document):
+ " Revert cochineal newtxmath definitions to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
+ if i != -1:
+ add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
+ document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
+
+
+def revert_labelonly(document):
+ " Revert labelonly tag for InsetRef "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ if k == -1:
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([label])
+ i += 1
+
+
+def revert_plural_refs(document):
+ " Revert plural and capitalized references "
+ i = find_token(document.header, "\\use_refstyle 1", 0)
+ use_refstyle = (i != 0)
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ plural = caps = suffix = False
+ k = find_token(document.body, "LaTeXCommand formatted", i, j)
+ if k != -1 and use_refstyle:
+ plural = get_bool_value(document.body, "plural", i, j, False)
+ caps = get_bool_value(document.body, "caps", i, j, False)
+ label = get_quoted_value(document.body, "reference", i, j)
+ if label:
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ else:
+ document.warning("Can't find label for reference at line %d!" % (i))
+
+ # this effectively tests also for use_refstyle and a formatted reference
+ # we do this complicated test because we would otherwise do this erasure
+ # over and over and over
+ if not ((plural or caps) and suffix):
+ del_token(document.body, "plural", i, j)
+ del_token(document.body, "caps", i, j - 1) # since we deleted a line
+ i = j - 1
+ continue
+
+ if caps:
+ prefix = prefix[0].title() + prefix[1:]
+ cmd = "\\" + prefix + "ref"
+ if plural:
+ cmd += "[s]"
+ cmd += "{" + suffix + "}"
+ document.body[i:j+1] = put_cmd_in_ert([cmd])
+ i += 1
+
+
+def revert_noprefix(document):
+ " Revert labelonly tags with 'noprefix' set "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ noprefix = False
+ if k != -1:
+ noprefix = get_bool_value(document.body, "noprefix", i, j)
+ if not noprefix:
+ # either it was not a labelonly command, or else noprefix was not set.
+ # in that case, we just delete the option.
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ # we'll leave this as an ordinary labelonly reference
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([suffix])
+ i += 1
+
+
+def revert_biblatex(document):
+ " Revert biblatex support "
+
+ #
+ # Header
+ #
+
+ # 1. Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ # 2. Store biblatex state and revert to natbib
+ biblatex = False
+ if engine in ["biblatex", "biblatex-natbib"]:
+ biblatex = True
+ document.header[i] = "\\cite_engine natbib"
+
+ # 3. Store and remove new document headers
+ bibstyle = ""
+ i = find_token(document.header, "\\biblatex_bibstyle", 0)
+ if i != -1:
+ bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
+ del document.header[i]
+
+ citestyle = ""
+ i = find_token(document.header, "\\biblatex_citestyle", 0)
+ if i != -1:
+ citestyle = get_value(document.header, "\\biblatex_citestyle", i)
+ del document.header[i]
+
+ biblio_options = ""
+ i = find_token(document.header, "\\biblio_options", 0)
+ if i != -1:
+ biblio_options = get_value(document.header, "\\biblio_options", i)
+ del document.header[i]
+
+ if biblatex:
+ bbxopts = "[natbib=true"
+ if bibstyle != "":
+ bbxopts += ",bibstyle=" + bibstyle
+ if citestyle != "":
+ bbxopts += ",citestyle=" + citestyle
+ if biblio_options != "":
+ bbxopts += "," + biblio_options
+ bbxopts += "]"
+ add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
+
+ #
+ # Body
+ #
+
+ # 1. Bibtex insets
+ i = 0
+ bibresources = []
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+ i += 1
+ continue
+ bibs = get_quoted_value(document.body, "bibfiles", i, j)
+ opts = get_quoted_value(document.body, "biblatexopts", i, j)
+ # store resources
+ if bibs:
+ bibresources += bibs.split(",")
+ else:
+ document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
+ # remove biblatexopts line
+ k = find_token(document.body, "biblatexopts", i, j)
+ if k != -1:
+ del document.body[k]
+ # Re-find inset end line
+ j = find_end_of_inset(document.body, i)
+ # Insert ERT \\printbibliography and wrap bibtex inset to a Note
+ if biblatex:
+ pcmd = "printbibliography"
+ if opts:
+ pcmd += "[" + opts + "]"
+ repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
+ "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
+ "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
+ "status open", "", "\\begin_layout Plain Layout" ]
+ repl += document.body[i:j+1]
+ repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
+ document.body[i:j+1] = repl
+ j += 27
+
+ i = j + 1
+
+ if biblatex:
+ for b in bibresources:
+ add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
+
+ # 2. Citation insets
+
+ # Specific citation insets used in biblatex that need to be reverted to ERT
+ new_citations = {
+ "Cite" : "Cite",
+ "citebyear" : "citeyear",
+ "citeyear" : "cite*",
+ "Footcite" : "Smartcite",
+ "footcite" : "smartcite",
+ "Autocite" : "Autocite",
+ "autocite" : "autocite",
+ "citetitle" : "citetitle",
+ "citetitle*" : "citetitle*",
+ "fullcite" : "fullcite",
+ "footfullcite" : "footfullcite",
+ "supercite" : "supercite",
+ "citeauthor" : "citeauthor",
+ "citeauthor*" : "citeauthor*",
+ "Citeauthor" : "Citeauthor",
+ "Citeauthor*" : "Citeauthor*"
+ }
+
+ # All commands accepted by LyX < 2.3. Everything else throws an error.
+ old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
+ "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
+ "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
+ "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
+ "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
+ "footcitet", "footcitep", "footcitealt", "footcitealp",\
+ "footciteauthor", "footciteyear", "footciteyearpar",\
+ "citefield", "citetitle", "cite*" ]
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+ cmd = get_value(document.body, "LatexCommand", k)
+ if biblatex and cmd in list(new_citations.keys()):
+ pre = get_quoted_value(document.body, "before", i, j)
+ post = get_quoted_value(document.body, "after", i, j)
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ key = "???"
+ # Replace known new commands with ERT
+ res = "\\" + new_citations[cmd]
+ if pre:
+ res += "[" + pre + "]"
+ if post:
+ res += "[" + post + "]"
+ elif pre:
+ res += "[]"
+ res += "{" + key + "}"
+ document.body[i:j+1] = put_cmd_in_ert([res])
+ elif cmd not in old_citations:
+ # Reset unknown commands to cite. This is what LyX does as well
+ # (but LyX 2.2 would break on unknown commands)
+ document.body[k] = "LatexCommand cite"
+ document.warning("Reset unknown cite command '%s' with cite" % cmd)
+ i = j + 1
+
+ # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
+ if biblatex:
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k-1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (biblatex emulation) ###",
+ "Provides natbib 1",
+ "### End of insertion by lyx2lyx (biblatex emulation) ###"
+ ]
+
+
+def revert_citekeyonly(document):
+ " Revert keyonly cite command to ERT "
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+ cmd = get_value(document.body, "LatexCommand", k)
+ if cmd != "keyonly":
+ i = j + 1
+ continue
+
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ # Replace known new commands with ERT
+ document.body[i:j+1] = put_cmd_in_ert([key])
+ i = j + 1
+
+
+
+def revert_bibpackopts(document):
+ " Revert support for natbib/jurabib package options "
+
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ biblatex = False
+ if engine not in ["natbib", "jurabib"]:
+ return
+
+ i = find_token(document.header, "\\biblio_options", 0)
+ if i == -1:
+ # Nothing to do if we have no options
+ return
+
+ biblio_options = get_value(document.header, "\\biblio_options", i)
+ del document.header[i]
+
+ if not biblio_options:
+ # Nothing to do for empty options
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k - 1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (bibliography package options) ###",
+ "PackageOptions " + engine + " " + biblio_options,
+ "### End of insertion by lyx2lyx (bibliography package options) ###"
+ ]
+
+
+def revert_qualicites(document):
+ " Revert qualified citation list commands to ERT "
+
+ # Citation insets that support qualified lists, with their LaTeX code
+ ql_citations = {
+ "cite" : "cites",
+ "Cite" : "Cites",
+ "citet" : "textcites",
+ "Citet" : "Textcites",
+ "citep" : "parencites",
+ "Citep" : "Parencites",
+ "Footcite" : "Smartcites",
+ "footcite" : "smartcites",
+ "Autocite" : "Autocites",
+ "autocite" : "autocites",
+ }
+
+ # Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ biblatex = engine in ["biblatex", "biblatex-natbib"]
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+ pres = find_token(document.body, "pretextlist", i, j)
+ posts = find_token(document.body, "posttextlist", i, j)
+ if pres == -1 and posts == -1:
+ # nothing to do.
+ i = j + 1
+ continue
+ pretexts = get_quoted_value(document.body, "pretextlist", pres)
+ posttexts = get_quoted_value(document.body, "posttextlist", posts)
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+ cmd = get_value(document.body, "LatexCommand", k)
+ if biblatex and cmd in list(ql_citations.keys()):
+ pre = get_quoted_value(document.body, "before", i, j)
+ post = get_quoted_value(document.body, "after", i, j)
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ key = "???"
+ keys = key.split(",")
+ prelist = pretexts.split("\t")
+ premap = dict()
+ for pp in prelist:
+ ppp = pp.split(" ", 1)
+ premap[ppp[0]] = ppp[1]
+ postlist = posttexts.split("\t")
+ postmap = dict()
+ for pp in postlist:
+ ppp = pp.split(" ", 1)
+ postmap[ppp[0]] = ppp[1]
+ # Replace known new commands with ERT
+ if "(" in pre or ")" in pre:
+ pre = "{" + pre + "}"
+ if "(" in post or ")" in post:
+ post = "{" + post + "}"
+ res = "\\" + ql_citations[cmd]
+ if pre:
+ res += "(" + pre + ")"
+ if post:
+ res += "(" + post + ")"
+ elif pre:
+ res += "()"
+ for kk in keys:
+ if premap.get(kk, "") != "":
+ res += "[" + premap[kk] + "]"
+ if postmap.get(kk, "") != "":
+ res += "[" + postmap[kk] + "]"
+ elif premap.get(kk, "") != "":
+ res += "[]"
+ res += "{" + kk + "}"
+ document.body[i:j+1] = put_cmd_in_ert([res])
+ else:
+ # just remove the params
+ del document.body[posttexts]
+ del document.body[pretexts]
+ i += 1
+
+
+command_insets = ["bibitem", "citation", "href", "index_print", "nomenclature"]
+def convert_literalparam(document):
+ " Add param literal "
+
+ for inset in command_insets:
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
+ i += 1
+ continue
+ while i < j and document.body[i].strip() != '':
+ i += 1
+ # href is already fully latexified. Here we can switch off literal.
+ if inset == "href":
+ document.body.insert(i, "literal \"false\"")
+ else:
+ document.body.insert(i, "literal \"true\"")
+
+
+
+def revert_literalparam(document):
+ " Remove param literal "
+
+ for inset in command_insets:
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset CommandInset %s' % inset, i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of %s inset at line %d" % (inset, i))
+ i += 1
+ continue
+ k = find_token(document.body, 'literal', i, j)
+ if k == -1:
+ i += 1
+ continue
+ del document.body[k]
+
+
+
+def revert_multibib(document):
+ " Revert multibib support "
+
+ # 1. Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ # 2. Do we use biblatex?
+ biblatex = False
+ if engine in ["biblatex", "biblatex-natbib"]:
+ biblatex = True
+
+ # 3. Store and remove multibib document header
+ multibib = ""
+ i = find_token(document.header, "\\multibib", 0)
+ if i != -1:
+ multibib = get_value(document.header, "\\multibib", i)
+ del document.header[i]
+
+ if not multibib:
+ return
+
+ # 4. The easy part: Biblatex
+ if biblatex:
+ i = find_token(document.header, "\\biblio_options", 0)
+ if i == -1:
+ k = find_token(document.header, "\\use_bibtopic", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\use_bibtopic header found!")
+ return
+ document.header[k-1 : k-1] = ["\\biblio_options " + "refsection=" + multibib]
+ else:
+ biblio_options = get_value(document.header, "\\biblio_options", i)
+ if biblio_options:
+ biblio_options += ","
+ biblio_options += "refsection=" + multibib
+ document.header[i] = "\\biblio_options " + biblio_options
+
+ # Bibtex insets
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+ i += 1
+ continue
+ btprint = get_quoted_value(document.body, "btprint", i, j)
+ if btprint != "bibbysection":
+ i += 1
+ continue
+ opts = get_quoted_value(document.body, "biblatexopts", i, j)
+ # change btprint line
+ k = find_token(document.body, "btprint", i, j)
+ if k != -1:
+ document.body[k] = "btprint \"btPrintCited\""
+ # Insert ERT \\bibbysection and wrap bibtex inset to a Note
+ pcmd = "bibbysection"
+ if opts:
+ pcmd += "[" + opts + "]"
+ repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
+ "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
+ "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
+ "status open", "", "\\begin_layout Plain Layout" ]
+ repl += document.body[i:j+1]
+ repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
+ document.body[i:j+1] = repl
+ j += 27
+
+ i = j + 1
+ return
+
+ # 5. More tricky: Bibtex/Bibtopic
+ k = find_token(document.header, "\\use_bibtopic", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\use_bibtopic header found!")
+ return
+ document.header[k] = "\\use_bibtopic true"
+
+ # Possible units. This assumes that the LyX name follows the std,
+ # which might not always be the case. But it's as good as we can get.
+ units = {
+ "part" : "Part",
+ "chapter" : "Chapter",
+ "section" : "Section",
+ "subsection" : "Subsection",
+ }
+
+ if multibib not in units.keys():
+ document.warning("Unknown multibib value `%s'!" % nultibib)
+ return
+ unit = units[multibib]
+ btunit = False
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_layout " + unit, i)
+ if i == -1:
+ break
+ if btunit:
+ document.body[i-1 : i-1] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "end{btUnit}", "\\end_layout",
+ "\\begin_layout Plain Layout", "",
+ "\\backslash",
+ "begin{btUnit}"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+ i += 21
+ else:
+ document.body[i-1 : i-1] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "begin{btUnit}"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+ i += 16
+ btunit = True
+ i += 1
+
+ if btunit:
+ i = find_token(document.body, "\\end_body", i)
+ document.body[i-1 : i-1] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "end{btUnit}"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_chapterbib(document):
+ " Revert chapterbib support "
+
+ # 1. Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ # 2. Do we use biblatex?
+ biblatex = False
+ if engine in ["biblatex", "biblatex-natbib"]:
+ biblatex = True
+
+ # 3. Store multibib document header value
+ multibib = ""
+ i = find_token(document.header, "\\multibib", 0)
+ if i != -1:
+ multibib = get_value(document.header, "\\multibib", i)
+
+ if not multibib or multibib != "child":
+ # nothing to do
+ return
+
+ # 4. remove multibib header
+ del document.header[i]
+
+ # 5. Biblatex
+ if biblatex:
+ # find include insets
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset include", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+ i += 1
+ continue
+ parent = get_containing_layout(document.body, i)
+ parbeg = parent[1]
+
+ # Insert ERT \\newrefsection before inset
+ beg = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "newrefsection"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+ document.body[parbeg-1:parbeg-1] = beg
+ j += len(beg)
+ i = j + 1
+ return
+
+ # 6. Bibtex/Bibtopic
+ i = find_token(document.header, "\\use_bibtopic", 0)
+ if i == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\use_bibtopic header found!")
+ return
+ if get_value(document.header, "\\use_bibtopic", i) == "true":
+ # find include insets
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset include", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+ i += 1
+ continue
+ parent = get_containing_layout(document.body, i)
+ parbeg = parent[1]
+ parend = parent[2]
+
+ # Insert wrap inset into \\begin{btUnit}...\\end{btUnit}
+ beg = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "begin{btUnit}"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+ end = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "end{btUnit}"
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+ document.body[parend+1:parend+1] = end
+ document.body[parbeg-1:parbeg-1] = beg
+ j += len(beg) + len(end)
+ i = j + 1
+ return
+
+ # 7. Chapterbib proper
+ add_to_preamble(document, ["\\usepackage{chapterbib}"])
+
+
+def convert_dashligatures(document):
+ "Set 'use_dash_ligatures' according to content."
+ use_dash_ligatures = None
+ # eventually remove preamble code from 2.3->2.2 conversion:
+ for i, line in enumerate(document.preamble):
+ if i > 1 and line == r'\renewcommand{\textemdash}{---}':
+ if (document.preamble[i-1] == r'\renewcommand{\textendash}{--}'
+ and document.preamble[i-2] == '% Added by lyx2lyx'):
+ del document.preamble[i-2:i+1]
+ use_dash_ligatures = True
+ if use_dash_ligatures is None:
+ # Look for dashes:
+ # (Documents by LyX 2.1 or older have "\twohyphens\n" or "\threehyphens\n"
+ # as interim representation for dash ligatures in 2.2.)
+ has_literal_dashes = False
+ has_ligature_dashes = False
+ j = 0
+ for i, line in enumerate(document.body):
+ # Skip some document parts where dashes are not converted
+ if (i < j) or line.startswith("\\labelwidthstring"):
+ continue
+ words = line.split()
+ if (len(words) > 1 and words[0] == "\\begin_inset"
+ and (words[1] in ["CommandInset", "ERT", "External", "Formula",
+ "FormulaMacro", "Graphics", "IPA", "listings"]
+ or ' '.join(words[1:]) == "Flex Code")):
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: "
+ "Can't find end of %s inset at line %d" % (words[1],i))
+ continue
+ if line == "\\begin_layout LyX-Code":
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: "
+ "Can't find end of %s layout at line %d" % (words[1],i))
+ continue
+ # literal dash followed by a word or no-break space:
+ if re.search(u"[\u2013\u2014]([\w\u00A0]|$)", line,
+ flags=re.UNICODE):
+ has_literal_dashes = True
+ # ligature dash followed by word or no-break space on next line:
+ if re.search(u"(\\\\twohyphens|\\\\threehyphens)", line,
+ flags=re.UNICODE) and re.match(u"[\w\u00A0]",
+ document.body[i+1], flags=re.UNICODE):
+ has_ligature_dashes = True
+ if has_literal_dashes and has_ligature_dashes:
+ # TODO: insert a warning note in the document?
+ document.warning('This document contained both literal and '
+ '"ligature" dashes.\n Line breaks may have changed. '
+ 'See UserGuide chapter 3.9.1 for details.')
+ elif has_literal_dashes:
+ use_dash_ligatures = False
+ elif has_ligature_dashes:
+ use_dash_ligatures = True
+ # insert the setting if there is a preferred value
+ if use_dash_ligatures is not None:
+ i = find_token(document.header, "\\use_microtype", 0)
+ if i != -1:
+ document.header.insert(i+1, "\\use_dash_ligatures %s"
+ % str(use_dash_ligatures).lower())
+
+def revert_dashligatures(document):
+ """Remove font ligature settings for en- and em-dashes.
+ Revert conversion of \twodashes or \threedashes to literal dashes."""
+ i = find_token(document.header, "\\use_dash_ligatures", 0)
+ if i == -1:
+ return
+ use_dash_ligatures = get_bool_value(document.header, "\\use_dash_ligatures", i)
+ del document.header[i]
+ if not use_dash_ligatures or document.backend != "latex":
+ return
+
+ j = 0
+ new_body = []
+ for i, line in enumerate(document.body):
+ # Skip some document parts where dashes are not converted
+ if (i < j) or line.startswith("\\labelwidthstring"):
+ new_body.append(line)
+ continue
+ words = line.split()
+ if (len(words) > 1 and words[0] == "\\begin_inset"
+ and (words[1] in ["CommandInset", "ERT", "External", "Formula",
+ "FormulaMacro", "Graphics", "IPA", "listings"]
+ or ' '.join(words[1:]) == "Flex Code")):
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of "
+ + words[1] + " inset at line " + str(i))
+ new_body.append(line)
+ continue
+ if line == "\\begin_layout LyX-Code":
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: "
+ "Can't find end of %s layout at line %d" % (words[1],i))
+ new_body.append(line)
+ continue
+ # TODO: skip replacement in typewriter fonts
+ line = line.replace(u'\u2013', '\\twohyphens\n')
+ line = line.replace(u'\u2014', '\\threehyphens\n')
+ lines = line.split('\n')
+ new_body.extend(line.split('\n'))
+ document.body = new_body
+ # redefine the dash LICRs to use ligature dashes:
+ add_to_preamble(document, [r'\renewcommand{\textendash}{--}',
+ r'\renewcommand{\textemdash}{---}'])
+
+
+def revert_noto(document):
+ " Revert Noto font definitions to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ preamble = ""
+ i = find_token(document.header, "\\font_roman \"NotoSerif-TLF\"", 0)
+ if i != -1:
+ add_to_preamble(document, ["\\renewcommand{\\rmdefault}{NotoSerif-TLF}"])
+ document.header[i] = document.header[i].replace("NotoSerif-TLF", "default")
+ i = find_token(document.header, "\\font_sans \"NotoSans-TLF\"", 0)
+ if i != -1:
+ add_to_preamble(document, ["\\renewcommand{\\sfdefault}{NotoSans-TLF}"])
+ document.header[i] = document.header[i].replace("NotoSans-TLF", "default")
+ i = find_token(document.header, "\\font_typewriter \"NotoMono-TLF\"", 0)
+ if i != -1:
+ add_to_preamble(document, ["\\renewcommand{\\ttdefault}{NotoMono-TLF}"])
+ document.header[i] = document.header[i].replace("NotoMono-TLF", "default")
+
+
+def revert_xout(document):
+ " Reverts \\xout font attribute "
+ changed = revert_font_attrs(document.body, "\\xout", "\\xout")
+ if changed == True:
+ insert_to_preamble(document, \
+ ['% for proper cross-out',
+ '\\PassOptionsToPackage{normalem}{ulem}',
+ '\\usepackage{ulem}'])
+
+
+def convert_mathindent(document):
+ " add the \\is_math_indent tag "
+ # check if the document uses the class option "fleqn"
+ k = find_token(document.header, "\\quotes_style", 0)
+ regexp = re.compile(r'^.*fleqn.*')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header.insert(k, "\\is_math_indent 1")
+ # delete the found option
+ document.header[i] = document.header[i].replace(",fleqn", "")
+ document.header[i] = document.header[i].replace(", fleqn", "")
+ document.header[i] = document.header[i].replace("fleqn,", "")
+ j = find_re(document.header, regexp, 0)
+ if i == j:
+ # then we have fleqn as the only option
+ del document.header[i]
+ else:
+ document.header.insert(k, "\\is_math_indent 0")
+
+
+def revert_mathindent(document):
+ " Define mathindent if set in the document "
+ # first output the length
+ regexp = re.compile(r'(\\math_indentation)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ value = get_value(document.header, "\\math_indentation" , i).split()[0]
+ if value != "default":
+ add_to_preamble(document, ["\\setlength{\\mathindent}{" + value + '}'])
+ del document.header[i]
+ # now set the document class option
+ regexp = re.compile(r'(\\is_math_indent 1)')
+ i = find_re(document.header, regexp, 0)
+ if i == -1:
+ regexp = re.compile(r'(\\is_math_indent)')
+ j = find_re(document.header, regexp, 0)
+ del document.header[j]
+ else:
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options fleqn,")
+ del document.header[i]
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l, "\\options fleqn")
+ del document.header[i + 1]
+
+
+def revert_baselineskip(document):
+ " Revert baselineskips to TeX code "
+ i = 0
+ vspaceLine = 0
+ hspaceLine = 0
+ while True:
+ regexp = re.compile(r'^.*baselineskip%.*$')
+ i = find_re(document.body, regexp, i)
+ if i == -1:
+ return
+ vspaceLine = find_token(document.body, "\\begin_inset VSpace", i)
+ if vspaceLine == i:
+ # output VSpace inset as TeX code
+ # first read out the values
+ beg = document.body[i].rfind("VSpace ");
+ end = document.body[i].rfind("baselineskip%");
+ baselineskip = float(document.body[i][beg + 7:end]);
+ # we store the value in percent, thus divide by 100
+ baselineskip = baselineskip/100;
+ baselineskip = str(baselineskip);
+ # check if it is the starred version
+ if document.body[i].find('*') != -1:
+ star = '*'
+ else:
+ star = ''
+ # now output TeX code
+ endInset = find_end_of_inset(document.body, i)
+ if endInset == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset' of VSpace inset.")
+ return
+ else:
+ document.body[vspaceLine: endInset + 1] = put_cmd_in_ert("\\vspace" + star + '{' + baselineskip + "\\baselineskip}")
+ hspaceLine = find_token(document.body, "\\begin_inset space \\hspace", i - 1)
+ document.warning("hspaceLine: " + str(hspaceLine))
+ document.warning("i: " + str(i))
+ if hspaceLine == i - 1:
+ # output space inset as TeX code
+ # first read out the values
+ beg = document.body[i].rfind("\\length ");
+ end = document.body[i].rfind("baselineskip%");
+ baselineskip = float(document.body[i][beg + 7:end]);
+ document.warning("baselineskip: " + str(baselineskip))
+ # we store the value in percent, thus divide by 100
+ baselineskip = baselineskip/100;
+ baselineskip = str(baselineskip);
+ # check if it is the starred version
+ if document.body[i-1].find('*') != -1:
+ star = '*'
+ else:
+ star = ''
+ # now output TeX code
+ endInset = find_end_of_inset(document.body, i)
+ if endInset == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset' of space inset.")
+ return
+ else:
+ document.body[hspaceLine: endInset + 1] = put_cmd_in_ert("\\hspace" + star + '{' + baselineskip + "\\baselineskip}")
+
+ i = i + 1
+
+
+def revert_rotfloat(document):
+ " Revert placement options for rotated floats "
+ i = 0
+ j = 0
+ k = 0
+ while True:
+ i = find_token(document.body, "sideways true", i)
+ if i != -1:
+ regexp = re.compile(r'^.*placement.*$')
+ j = find_re(document.body, regexp, i-2)
+ if j == -1:
+ return
+ if j != i-2:
+ i = i + 1
+ continue
+ else:
+ return
+ # we found a sideways float with placement options
+ # at first store the placement
+ beg = document.body[i-2].rfind(" ");
+ placement = document.body[i-2][beg+1:]
+ # check if the option'H' is used
+ if placement.find("H") != -1:
+ add_to_preamble(document, ["\\usepackage{float}"])
+ # now check if it is a starred type
+ if document.body[i-1].find("wide true") != -1:
+ star = '*'
+ else:
+ star = ''
+ # store the float type
+ beg = document.body[i-3].rfind(" ");
+ fType = document.body[i-3][beg+1:]
+ # now output TeX code
+ endInset = find_end_of_inset(document.body, i-3)
+ if endInset == -1:
+ document.warning("Malformed LyX document: Missing '\\end_inset' of Float inset.")
+ return
+ else:
+ document.body[endInset-2: endInset+1] = put_cmd_in_ert("\\end{sideways" + fType + star + '}')
+ document.body[i-3: i+2] = put_cmd_in_ert("\\begin{sideways" + fType + star + "}[" + placement + ']')
+ add_to_preamble(document, ["\\usepackage{rotfloat}"])
+
+ i = i + 1
+
+
+def convert_allowbreak(document):
+ " Zero widths Space-inset -> \SpecialChar allowbreak. "
+ body = "\n".join(document.body)
+ body = body.replace("\\begin_inset space \hspace{}\n"
+ "\\length 0dd\n"
+ "\\end_inset\n\n",
+ "\\SpecialChar allowbreak\n")
+ document.body = body.split("\n")
+
+
+def revert_allowbreak(document):
+ " \SpecialChar allowbreak -> Zero widths Space-inset. "
+ body = "\n".join(document.body)
+ body = body.replace("\\SpecialChar allowbreak\n",
+ "\n\\begin_inset space \hspace{}\n"
+ "\\length 0dd\n"
+ "\\end_inset\n\n")
+ document.body = body.split("\n")
+
+
+def convert_mathnumberpos(document):
+ " add the \\math_number_before tag "
+ # check if the document uses the class option "leqno"
+ k = find_token(document.header, "\\quotes_style", 0)
+ m = find_token(document.header, "\\options", 0)
+ regexp = re.compile(r'^.*leqno.*')
+ i = find_re(document.header, regexp, 0)
+ if i != -1 and i == m:
+ document.header.insert(k, "\\math_number_before 1")
+ # delete the found option
+ document.header[i] = document.header[i].replace(",leqno", "")
+ document.header[i] = document.header[i].replace(", leqno", "")
+ document.header[i] = document.header[i].replace("leqno,", "")
+ j = find_re(document.header, regexp, 0)
+ if i == j:
+ # then we have leqno as the only option
+ del document.header[i]
+ else:
+ document.header.insert(k, "\\math_number_before 0")
+
+
+def revert_mathnumberpos(document):
+ " add the document class option leqno"
+ regexp = re.compile(r'(\\math_number_before 1)')
+ i = find_re(document.header, regexp, 0)
+ if i == -1:
+ regexp = re.compile(r'(\\math_number_before)')
+ j = find_re(document.header, regexp, 0)
+ del document.header[j]
+ else:
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options leqno,")
+ del document.header[i]
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l, "\\options leqno")
+ del document.header[i + 1]
+
+
+def convert_mathnumberingname(document):
+ " rename the \\math_number_before tag to \\math_numbering_side "
+ regexp = re.compile(r'(\\math_number_before 1)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\math_numbering_side left"
+ regexp = re.compile(r'(\\math_number_before 0)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\math_numbering_side default"
+ # check if the document uses the class option "reqno"
+ k = find_token(document.header, "\\math_numbering_side", 0)
+ m = find_token(document.header, "\\options", 0)
+ regexp = re.compile(r'^.*reqno.*')
+ i = find_re(document.header, regexp, 0)
+ if i != -1 and i == m:
+ document.header[k] = "\\math_numbering_side right"
+ # delete the found option
+ document.header[i] = document.header[i].replace(",reqno", "")
+ document.header[i] = document.header[i].replace(", reqno", "")
+ document.header[i] = document.header[i].replace("reqno,", "")
+ j = find_re(document.header, regexp, 0)
+ if i == j:
+ # then we have reqno as the only option
+ del document.header[i]
+
+
+def revert_mathnumberingname(document):
+ " rename the \\math_numbering_side tag back to \\math_number_before "
+ # just rename
+ regexp = re.compile(r'(\\math_numbering_side left)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\math_number_before 1"
+ # add the option reqno and delete the tag
+ regexp = re.compile(r'(\\math_numbering_side right)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\math_number_before 0"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options reqno,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l, "\\options reqno")
+ # add the math_number_before tag
+ regexp = re.compile(r'(\\math_numbering_side default)')
+ i = find_re(document.header, regexp, 0)
+ if i != -1:
+ document.header[i] = "\\math_number_before 0"
+
+
+def convert_minted(document):
+ " add the \\use_minted tag "
+ document.header.insert(-1, "\\use_minted 0")
+
+
+def revert_minted(document):
+ " remove the \\use_minted tag "
+ i = find_token(document.header, "\\use_minted", 0)
+ if i != -1:
+ document.header.pop(i)
+
+
+##
+# Conversion hub
+#
+
+supported_versions = ["2.3.0", "2.3"]
+convert = [
+ [509, [convert_microtype]],
+ [510, [convert_dateinset]],
+ [511, [convert_ibranches]],
+ [512, [convert_beamer_article_styles]],
+ [513, []],
+ [514, []],
+ [515, []],
+ [516, [convert_inputenc]],
+ [517, []],
+ [518, [convert_iopart]],
+ [519, [convert_quotestyle]],
+ [520, []],
+ [521, [convert_frenchquotes]],
+ [522, []],
+ [523, []],
+ [524, []],
+ [525, []],
+ [526, []],
+ [527, []],
+ [528, []],
+ [529, []],
+ [530, []],
+ [531, []],
+ [532, [convert_literalparam]],
+ [533, []],
+ [534, []],
+ [535, [convert_dashligatures]],
+ [536, []],
+ [537, []],
+ [538, [convert_mathindent]],
+ [539, []],
+ [540, []],
+ [541, [convert_allowbreak]],
+ [542, [convert_mathnumberpos]],
+ [543, [convert_mathnumberingname]],
+ [544, [convert_minted]]
+ ]
+
+revert = [
+ [543, [revert_minted]],
+ [542, [revert_mathnumberingname]],
+ [541, [revert_mathnumberpos]],
+ [540, [revert_allowbreak]],
+ [539, [revert_rotfloat]],
+ [538, [revert_baselineskip]],
+ [537, [revert_mathindent]],
+ [536, [revert_xout]],
+ [535, [revert_noto]],
+ [534, [revert_dashligatures]],
+ [533, [revert_chapterbib]],
+ [532, [revert_multibib]],
+ [531, [revert_literalparam]],
+ [530, [revert_qualicites]],
+ [529, [revert_bibpackopts]],
+ [528, [revert_citekeyonly]],
+ [527, [revert_biblatex]],
+ [526, [revert_noprefix]],
+ [525, [revert_plural_refs]],
+ [524, [revert_labelonly]],
+ [523, [revert_crimson, revert_cochinealmath]],
+ [522, [revert_cjkquotes]],
+ [521, [revert_dynamicquotes]],
+ [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
+ [519, [revert_plainquote]],
+ [518, [revert_quotestyle]],
+ [517, [revert_iopart]],
+ [516, [revert_quotes]],
+ [515, []],
+ [514, [revert_urdu, revert_syriac]],
+ [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
+ [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
+ [511, [revert_beamer_article_styles]],
+ [510, [revert_ibranches]],
+ [509, []],
+ [508, [revert_microtype]]
+ ]
+
+
+if __name__ == "__main__":
+ pass
and is what is returned if we do not find anything. So you
can use that to set a default.
-get_quoted_value(lines, token, start[, end[, default]):
+get_quoted_value(lines, token, start[, end[, default]]):
Similar to get_value, but it will strip quotes off the
value, if they are present. So use this one for cases
where the value is normally quoted.
option="value"
and returns value. Returns "" if not found.
+get_bool_value(lines, token, start[, end[, default]]):
+ Like get_value, but returns a boolean.
+
del_token(lines, token, start[, end]):
Like find_token, but deletes the line if it finds one.
Returns True if a line got deleted, otherwise False.
return val.strip('"')
+def get_bool_value(lines, token, start, end = 0, default = None):
+ """ get_value(lines, token, start[[, end], default]) -> string
+
+ Find the next line that looks like:
+ token bool_value
+
+ Returns True if bool_value is 1 or true and
+ False if bool_value is 0 or false
+ """
+
+ val = get_quoted_value(lines, token, start, end, "")
+
+ if val == "1" or val == "true":
+ return True
+ if val == "0" or val == "false":
+ return False
+ return default
+
+
def get_option_value(line, option):
rx = option + '\s*=\s*"([^"]+)"'
rx = re.compile(rx)
def test_check_token(self):
line = "\\begin_layout Standard"
- self.assertEquals(check_token(line, '\\begin_layout'), True)
- self.assertEquals(check_token(line, 'Standard'), False)
+ self.assertEqual(check_token(line, '\\begin_layout'), True)
+ self.assertEqual(check_token(line, 'Standard'), False)
def test_is_nonempty_line(self):
- self.assertEquals(is_nonempty_line(lines[0]), False)
- self.assertEquals(is_nonempty_line(lines[1]), True)
- self.assertEquals(is_nonempty_line(" "*5), False)
+ self.assertEqual(is_nonempty_line(lines[0]), False)
+ self.assertEqual(is_nonempty_line(lines[1]), True)
+ self.assertEqual(is_nonempty_line(" "*5), False)
def test_find_token(self):
- self.assertEquals(find_token(lines, '\\emph', 0), 7)
- self.assertEquals(find_token(lines, '\\emph', 0, 5), -1)
- self.assertEquals(find_token(lines, '\\emp', 0, 0, True), -1)
- self.assertEquals(find_token(lines, '\\emp', 0, 0, False), 7)
- self.assertEquals(find_token(lines, 'emph', 0), -1)
+ self.assertEqual(find_token(lines, '\\emph', 0), 7)
+ self.assertEqual(find_token(lines, '\\emph', 0, 5), -1)
+ self.assertEqual(find_token(lines, '\\emp', 0, 0, True), -1)
+ self.assertEqual(find_token(lines, '\\emp', 0, 0, False), 7)
+ self.assertEqual(find_token(lines, 'emph', 0), -1)
def test_find_tokens(self):
tokens = ['\\emph', '\\end_inset']
- self.assertEquals(find_tokens(lines, tokens, 0), 4)
- self.assertEquals(find_tokens(lines, tokens, 0, 4), -1)
+ self.assertEqual(find_tokens(lines, tokens, 0), 4)
+ self.assertEqual(find_tokens(lines, tokens, 0, 4), -1)
if __name__ == '__main__':
def read_unicodesymbols():
" Read the unicodesymbols list of unicode characters and corresponding commands."
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
- fp = open(os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols'))
+ filename = os.path.join(pathname.strip('lyx2lyx'), 'unicodesymbols')
+
+ # For python 3+ we have to specify the encoding for those systems
+ # where the default is not UTF-8
+ fp = open(filename, encoding="utf8") if (not PY2) else open(filename)
+
spec_chars = []
# A backslash, followed by some non-word character, and then a character
# in brackets. The idea is to check for constructs like: \"{u}, which is how