# Uncomment only what you need to import, please.
-from parser_tools import del_token, find_token, find_end_of, find_end_of_inset, \
- find_re, get_option_value, get_value, get_quoted_value, set_option_value
+from parser_tools import del_token, find_token, find_token_backwards, find_end_of, \
+ find_end_of_inset, find_end_of_layout, find_re, get_option_value, get_containing_layout, \
+ get_value, get_quoted_value, set_option_value
#from parser_tools import find_token, find_end_of, find_tokens, \
#find_token_exact, find_end_of_inset, find_end_of_layout, \
- #find_token_backwards, is_in_inset, get_value, get_quoted_value, \
- #del_token, check_token
+ #is_in_inset, del_token, check_token
-from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
+from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert
#from lyx2lyx_tools import insert_to_preamble, \
-# put_cmd_in_ert, lyx2latex, latex_length, revert_flex_inset, \
+# lyx2latex, latex_length, revert_flex_inset, \
# revert_font_attrs, hex2ratio, str2bool
####################################################################
"Set English language variants Australian and Newzealand to English"
if document.language == "australian" or document.language == "newzealand":
- document.language = "english"
+ document.language = "english"
i = find_token(document.header, "\\language", 0)
if i != -1:
document.header[i] = "\\language english"
-
j = 0
while True:
j = find_token(document.body, "\\lang australian", j)
- if j == -1:
+ if j == -1:
j = find_token(document.body, "\\lang newzealand", 0)
if j == -1:
return
"use_xxx yyy => use_package xxx yyy"
packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
for p in packages:
- i = find_token(document.header, "\\use_%s" % p , 0)
+ i = find_token(document.header, "\\use_%s" % p, 0)
if i != -1:
- value = get_value(document.header, "\\use_%s" % p , i)
+ value = get_value(document.header, "\\use_%s" % p, i)
document.header[i] = "\\use_package %s %s" % (p, value)
def revert_use_packages(document):
"use_package xxx yyy => use_xxx yyy"
- packages = {"amsmath":"1", "esint":"1", "mathdots":"1", "mhchem":"1", "undertilde":"1"}
+ packages = ["amsmath", "esint", "mathdots", "mhchem", "undertilde"]
# the order is arbitrary for the use_package version, and not all packages need to be given.
# Ensure a complete list and correct order (important for older LyX versions and especially lyx2lyx)
- j = -1
- for p in packages.keys():
+ j = 0
+ for p in packages:
regexp = re.compile(r'(\\use_package\s+%s)' % p)
- i = find_re(document.header, regexp, 0)
+ i = find_re(document.header, regexp, j)
if i != -1:
- value = get_value(document.header, "\\use_package" , i).split()[1]
+ value = get_value(document.header, "\\use_package %s" % p, i).split()[1]
del document.header[i]
j = i
- for (p, v) in packages.items():
- document.header.insert(j, "\\use_%s %s" % (p, value))
+ document.header.insert(j, "\\use_%s %s" % (p, value))
j = j + 1
del document.header[i]
# We are looking for the natbib citation engine
- i = find_token(document.header, "\\cite_engine natbib", i)
+ i = find_token(document.header, "\\cite_engine natbib", 0)
if i == -1:
return
document.header[i] = "\\cite_engine natbib_" + engine_type
+def revert_cancel(document):
+ "add cancel to the preamble if necessary"
+ commands = ["cancelto", "cancel", "bcancel", "xcancel"]
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Formula', i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of Formula inset at line " + str(i))
+ i += 1
+ continue
+ code = "\n".join(document.body[i:j])
+ for c in commands:
+ if code.find("\\%s" % c) != -1:
+ add_to_preamble(document, ["\\usepackage{cancel}"])
+ return
+ i = j
+
+
+def revert_verbatim(document):
+ " Revert verbatim einvironments completely to TeX-code. "
+ i = 0
+ consecutive = False
+ subst_end = ['\end_layout', '', '\\begin_layout Plain Layout',
+ '\end_layout', '',
+ '\\begin_layout Plain Layout', '', '',
+ '\\backslash', '',
+ 'end{verbatim}',
+ '\\end_layout', '', '\\end_inset',
+ '', '', '\\end_layout']
+ subst_begin = ['\\begin_layout Standard', '\\noindent',
+ '\\begin_inset ERT', 'status collapsed', '',
+ '\\begin_layout Plain Layout', '', '', '\\backslash',
+ 'begin{verbatim}',
+ '\\end_layout', '', '\\begin_layout Plain Layout', '']
+ while 1:
+ i = find_token(document.body, "\\begin_layout Verbatim", i)
+ if i == -1:
+ return
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Can't find end of Verbatim layout")
+ i += 1
+ continue
+ # delete all line breaks insets (there are no other insets)
+ l = i
+ while 1:
+ n = find_token(document.body, "\\begin_inset Newline newline", l)
+ if n == -1:
+ n = find_token(document.body, "\\begin_inset Newline linebreak", l)
+ if n == -1:
+ break
+ m = find_end_of_inset(document.body, n)
+ del(document.body[m:m+1])
+ document.body[n:n+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
+ l += 1
+ j += 1
+ # consecutive verbatim environments need to be connected
+ k = find_token(document.body, "\\begin_layout Verbatim", j)
+ if k == j + 2 and consecutive == False:
+ consecutive = True
+ document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
+ document.body[i:i+1] = subst_begin
+ continue
+ if k == j + 2 and consecutive == True:
+ document.body[j:j+1] = ['\end_layout', '', '\\begin_layout Plain Layout']
+ del(document.body[i:i+1])
+ continue
+ if k != j + 2 and consecutive == True:
+ document.body[j:j+1] = subst_end
+ # the next paragraph must not be indented
+ document.body[j+19:j+19] = ['\\noindent']
+ del(document.body[i:i+1])
+ consecutive = False
+ continue
+ else:
+ document.body[j:j+1] = subst_end
+ # the next paragraph must not be indented
+ document.body[j+19:j+19] = ['\\noindent']
+ document.body[i:i+1] = subst_begin
+
+
+def revert_tipa(document):
+ " Revert native TIPA insets to mathed or ERT. "
+ i = 0
+ while 1:
+ i = find_token(document.body, "\\begin_inset IPA", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Can't find end of IPA inset")
+ i += 1
+ continue
+ Multipar = False
+ n = find_token(document.body, "\\begin_layout", i, j)
+ if n == -1:
+ document.warning("Malformed lyx document: IPA inset has no embedded layout")
+ i += 1
+ continue
+ m = find_end_of_layout(document.body, n)
+ if m == -1:
+ document.warning("Malformed lyx document: Can't find end of embedded layout")
+ i += 1
+ continue
+ content = document.body[n+1:m]
+ p = find_token(document.body, "\\begin_layout", m, j)
+ if p != -1 or len(content) > 1:
+ Multipar = True
+ content = document.body[i+1:j]
+ if Multipar:
+ # IPA insets with multiple pars need to be wrapped by \begin{IPA}...\end{IPA}
+ document.body[i:j+1] = ['\\end_layout', '', '\\begin_layout Standard'] + put_cmd_in_ert("\\begin{IPA}") + ['\\end_layout'] + content + ['\\begin_layout Standard'] + put_cmd_in_ert("\\end{IPA}")
+ add_to_preamble(document, ["\\usepackage{tipa,tipx}"])
+ else:
+ # single-par IPA insets can be reverted to mathed
+ document.body[i:j+1] = ["\\begin_inset Formula $\\text{\\textipa{" + content[0] + "}}$", "\\end_inset"]
+ i = j
+
+
+def revert_cell_rotation(document):
+ "Revert cell rotations to TeX-code"
+
+ load_rotating = False
+ i = 0
+ try:
+ while True:
+ # first, let's find out if we need to do anything
+ i = find_token(document.body, '<cell ', i)
+ if i == -1:
+ return
+ j = document.body[i].find('rotate="')
+ if j != -1:
+ k = document.body[i].find('"', j + 8)
+ value = document.body[i][j + 8 : k]
+ if value == "0":
+ rgx = re.compile(r' rotate="[^"]+?"')
+ # remove rotate option
+ document.body[i] = rgx.sub('', document.body[i])
+ elif value == "90":
+ rgx = re.compile(r' rotate="[^"]+?"')
+ document.body[i] = rgx.sub('rotate="true"', document.body[i])
+ else:
+ rgx = re.compile(r' rotate="[^"]+?"')
+ load_rotating = True
+ # remove rotate option
+ document.body[i] = rgx.sub('', document.body[i])
+ # write ERT
+ document.body[i + 5 : i + 5] = \
+ put_cmd_in_ert("\\end{turn}")
+ document.body[i + 4 : i + 4] = \
+ put_cmd_in_ert("\\begin{turn}{" + value + "}")
+
+ i += 1
+
+ finally:
+ if load_rotating:
+ add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
+
+
+def convert_cell_rotation(document):
+ 'Convert cell rotation statements from "true" to "90"'
+
+ i = 0
+ while True:
+ # first, let's find out if we need to do anything
+ i = find_token(document.body, '<cell ', i)
+ if i == -1:
+ return
+ j = document.body[i].find('rotate="true"')
+ if j != -1:
+ rgx = re.compile(r'rotate="[^"]+?"')
+ # convert "true" to "90"
+ document.body[i] = rgx.sub('rotate="90"', document.body[i])
+
+ i += 1
+
+
+def revert_table_rotation(document):
+ "Revert table rotations to TeX-code"
+
+ load_rotating = False
+ i = 0
+ try:
+ while True:
+ # first, let's find out if we need to do anything
+ i = find_token(document.body, '<features ', i)
+ if i == -1:
+ return
+ j = document.body[i].find('rotate="')
+ if j != -1:
+ end_table = find_token(document.body, '</lyxtabular>', j)
+ k = document.body[i].find('"', j + 8)
+ value = document.body[i][j + 8 : k]
+ if value == "0":
+ rgx = re.compile(r' rotate="[^"]+?"')
+ # remove rotate option
+ document.body[i] = rgx.sub('', document.body[i])
+ elif value == "90":
+ rgx = re.compile(r'rotate="[^"]+?"')
+ document.body[i] = rgx.sub('rotate="true"', document.body[i])
+ else:
+ rgx = re.compile(r' rotate="[^"]+?"')
+ load_rotating = True
+ # remove rotate option
+ document.body[i] = rgx.sub('', document.body[i])
+ # write ERT
+ document.body[end_table + 3 : end_table + 3] = \
+ put_cmd_in_ert("\\end{turn}")
+ document.body[i - 2 : i - 2] = \
+ put_cmd_in_ert("\\begin{turn}{" + value + "}")
+
+ i += 1
+
+ finally:
+ if load_rotating:
+ add_to_preamble(document, ["\\@ifundefined{turnbox}{\usepackage{rotating}}{}"])
+
+
+def convert_table_rotation(document):
+ 'Convert table rotation statements from "true" to "90"'
+
+ i = 0
+ while True:
+ # first, let's find out if we need to do anything
+ i = find_token(document.body, '<features ', i)
+ if i == -1:
+ return
+ j = document.body[i].find('rotate="true"')
+ if j != -1:
+ rgx = re.compile(r'rotate="[^"]+?"')
+ # convert "true" to "90"
+ document.body[i] = rgx.sub('rotate="90"', document.body[i])
+
+ i += 1
+
+
+def convert_listoflistings(document):
+ 'Convert ERT \lstlistoflistings to TOC lstlistoflistings inset'
+ # We can support roundtrip because the command is so simple
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset ERT", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Can't find end of ERT inset")
+ i += 1
+ continue
+ ert = get_ert(document.body, i)
+ if ert == "\\lstlistoflistings{}":
+ document.body[i:j] = ["\\begin_inset CommandInset toc", "LatexCommand lstlistoflistings", ""]
+ i = i + 4
+ else:
+ i = j + 1
+
+
+def revert_listoflistings(document):
+ 'Convert TOC lstlistoflistings inset to ERT lstlistoflistings'
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset CommandInset toc", i)
+ if i == -1:
+ return
+ if document.body[i+1] == "LatexCommand lstlistoflistings":
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed lyx document: Can't find end of TOC inset")
+ i += 1
+ continue
+ subst = put_cmd_in_ert("\\lstlistoflistings{}")
+ document.body[i:j+1] = subst
+ add_to_preamble(document, ["\\usepackage{listings}"])
+ i = i + 1
+
+
+def convert_use_amssymb(document):
+ "insert use_package amssymb"
+ regexp = re.compile(r'(\\use_package\s+amsmath)')
+ i = find_re(document.header, regexp, 0)
+ if i == -1:
+ document.warning("Malformed LyX document: Can't find \\use_package amsmath.")
+ return;
+ value = get_value(document.header, "\\use_package" , i).split()[1]
+ useamsmath = 0
+ try:
+ useamsmath = int(value)
+ except:
+ document.warning("Invalid \\use_package amsmath: " + value + ". Assuming auto.")
+ useamsmath = 1
+ j = find_token(document.preamble, "\\usepackage{amssymb}", 0)
+ if j == -1:
+ document.header.insert(i + 1, "\\use_package amssymb %d" % useamsmath)
+ else:
+ document.header.insert(i + 1, "\\use_package amssymb 2")
+ del document.preamble[j]
+
+
+def revert_use_amssymb(document):
+ "remove use_package amssymb"
+ regexp1 = re.compile(r'(\\use_package\s+amsmath)')
+ regexp2 = re.compile(r'(\\use_package\s+amssymb)')
+ i = find_re(document.header, regexp1, 0)
+ j = find_re(document.header, regexp2, 0)
+ value1 = "1" # default is auto
+ value2 = "1" # default is auto
+ if i != -1:
+ value1 = get_value(document.header, "\\use_package" , i).split()[1]
+ if j != -1:
+ value2 = get_value(document.header, "\\use_package" , j).split()[1]
+ del document.header[j]
+ if value1 != value2 and value2 == "2": # on
+ add_to_preamble(document, ["\\usepackage{amssymb}"])
+
+
+def revert_ancientgreek(document):
+ "Set the document language for ancientgreek to greek"
+
+ if document.language == "ancientgreek":
+ document.language = "greek"
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language greek"
+ j = 0
+ while True:
+ j = find_token(document.body, "\\lang ancientgreek", j)
+ if j == -1:
+ return
+ else:
+ document.body[j] = document.body[j].replace("\\lang ancientgreek", "\\lang greek")
+ j += 1
+
+
+def revert_languages(document):
+ "Set the document language for new supported languages to English"
+
+ languages = [
+ "coptic", "divehi", "hindi", "kurmanji", "lao", "marathi", "occitan", "sanskrit",
+ "syriac", "tamil", "telugu", "urdu"
+ ]
+ for n in range(len(languages)):
+ if document.language == languages[n]:
+ document.language = "english"
+ i = find_token(document.header, "\\language", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = 0
+ while j < len(document.body):
+ j = find_token(document.body, "\\lang " + languages[n], j)
+ if j != -1:
+ document.body[j] = document.body[j].replace("\\lang " + languages[n], "\\lang english")
+ j += 1
+ else:
+ j = len(document.body)
+
+
+def convert_armenian(document):
+ "Use polyglossia and thus non-TeX fonts for Armenian"
+
+ if document.language == "armenian":
+ i = find_token(document.header, "\\use_non_tex_fonts", 0)
+ if i != -1:
+ document.header[i] = "\\use_non_tex_fonts true"
+
+
+def revert_armenian(document):
+ "Use ArmTeX and thus TeX fonts for Armenian"
+
+ if document.language == "armenian":
+ i = find_token(document.header, "\\use_non_tex_fonts", 0)
+ if i != -1:
+ document.header[i] = "\\use_non_tex_fonts false"
+
+
+def revert_libertine(document):
+ " Revert native libertine font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_roman libertine", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ document.header[j] = "\\font_osf false"
+ else:
+ preamble += "[lining]"
+ preamble += "{libertine-type1}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_roman default"
+
+
+def revert_txtt(document):
+ " Revert native txtt font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_typewriter txtt", 0)
+ if i != -1:
+ preamble = "\\renewcommand{\\ttdefault}{txtt}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_typewriter default"
+
+
+def revert_mathdesign(document):
+ " Revert native mathdesign font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ mathdesign_dict = {
+ "mdbch": "charter",
+ "mdput": "utopia",
+ "mdugm": "garamond"
+ }
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ return
+ val = get_value(document.header, "\\font_roman", i)
+ if val in mathdesign_dict.keys():
+ preamble = "\\usepackage[%s" % mathdesign_dict[val]
+ expert = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ expert = True
+ document.header[j] = "\\font_osf false"
+ l = find_token(document.header, "\\font_sc true", 0)
+ if l != -1:
+ expert = True
+ document.header[l] = "\\font_sc false"
+ if expert:
+ preamble += ",expert"
+ preamble += "]{mathdesign}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_roman default"
+
+
+def revert_texgyre(document):
+ " Revert native TeXGyre font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ texgyre_fonts = ["tgadventor", "tgbonum", "tgchorus", "tgcursor", \
+ "tgheros", "tgpagella", "tgschola", "tgtermes"]
+ i = find_token(document.header, "\\font_roman", 0)
+ if i != -1:
+ val = get_value(document.header, "\\font_roman", i)
+ if val in texgyre_fonts:
+ preamble = "\\usepackage{%s}" % val
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_roman default"
+ i = find_token(document.header, "\\font_sans", 0)
+ if i != -1:
+ val = get_value(document.header, "\\font_sans", i)
+ if val in texgyre_fonts:
+ preamble = "\\usepackage{%s}" % val
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_sans default"
+ i = find_token(document.header, "\\font_typewriter", 0)
+ if i != -1:
+ val = get_value(document.header, "\\font_typewriter", i)
+ if val in texgyre_fonts:
+ preamble = "\\usepackage{%s}" % val
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_typewriter default"
+
+
+def revert_ipadeco(document):
+ " Revert IPA decorations to ERT "
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset IPADeco", i)
+ if i == -1:
+ return
+ end = find_end_of_inset(document.body, i)
+ if end == -1:
+ document.warning("Can't find end of inset at line " + str(i))
+ i += 1
+ continue
+ line = document.body[i]
+ rx = re.compile(r'\\begin_inset IPADeco (.*)$')
+ m = rx.match(line)
+ decotype = m.group(1)
+ if decotype != "toptiebar" and decotype != "bottomtiebar":
+ document.warning("Invalid IPADeco type: " + decotype)
+ i = end
+ continue
+ blay = find_token(document.body, "\\begin_layout Plain Layout", i, end)
+ if blay == -1:
+ document.warning("Can't find layout for inset at line " + str(i))
+ i = end
+ continue
+ bend = find_end_of_layout(document.body, blay)
+ if bend == -1:
+ document.warning("Malformed LyX document: Could not find end of IPADeco inset's layout.")
+ i = end
+ continue
+ substi = ["\\begin_inset ERT", "status collapsed", "",
+ "\\begin_layout Plain Layout", "", "", "\\backslash",
+ decotype + "{", "\\end_layout", "", "\\end_inset"]
+ substj = ["\\size default", "", "\\begin_inset ERT", "status collapsed", "",
+ "\\begin_layout Plain Layout", "", "}", "\\end_layout", "", "\\end_inset"]
+ # do the later one first so as not to mess up the numbering
+ document.body[bend:end + 1] = substj
+ document.body[i:blay + 1] = substi
+ i = end + len(substi) + len(substj) - (end - bend) - (blay - i) - 2
+ add_to_preamble(document, "\\usepackage{tipa}")
+
+
+def revert_ipachar(document):
+ ' Revert \\IPAChar to ERT '
+ i = 0
+ found = False
+ while i < len(document.body):
+ m = re.match(r'(.*)\\IPAChar \\(\w+\{\w+\})(.*)', document.body[i])
+ if m:
+ found = True
+ before = m.group(1)
+ ipachar = m.group(2)
+ after = m.group(3)
+ subst = [before,
+ '\\begin_inset ERT',
+ 'status collapsed', '',
+ '\\begin_layout Standard',
+ '', '', '\\backslash',
+ ipachar,
+ '\\end_layout', '',
+ '\\end_inset', '',
+ after]
+ document.body[i: i+1] = subst
+ i = i + len(subst)
+ else:
+ i = i + 1
+ if found:
+ add_to_preamble(document, "\\usepackage{tone}")
+
+
+def revert_minionpro(document):
+ " Revert native MinionPro font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_roman minionpro", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ document.header[j] = "\\font_osf false"
+ else:
+ preamble += "[lf]"
+ preamble += "{MinionPro}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_roman default"
+
+
+def revert_mathfonts(document):
+ " Revert native math font definitions to LaTeX "
+
+ i = find_token(document.header, "\\font_math", 0)
+ if i == -1:
+ return
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ val = get_value(document.header, "\\font_math", i)
+ if val == "eulervm":
+ add_to_preamble(document, "\\usepackage{eulervm}")
+ elif val == "default":
+ mathfont_dict = {
+ "lmodern": "\\renewcommand{\\rmdefault}{lmr}",
+ "minionpro": "\\usepackage[onlytext,lf]{MinionPro}",
+ "minionpro-osf": "\\usepackage[onlytext]{MinionPro}",
+ "palatino": "\\renewcommand{\\rmdefault}{ppl}",
+ "palatino-osf": "\\renewcommand{\\rmdefault}{pplj}",
+ "times": "\\renewcommand{\\rmdefault}{ptm}",
+ "utopia": "\\renewcommand{\\rmdefault}{futs}",
+ "utopia-osf": "\\renewcommand{\\rmdefault}{futj}",
+ }
+ j = find_token(document.header, "\\font_roman", 0)
+ if j != -1:
+ rm = get_value(document.header, "\\font_roman", j)
+ k = find_token(document.header, "\\font_osf true", 0)
+ if k != -1:
+ rm += "-osf"
+ if rm in mathfont_dict.keys():
+ add_to_preamble(document, mathfont_dict[rm])
+ document.header[j] = "\\font_roman default"
+ if k != -1:
+ document.header[k] = "\\font_osf false"
+ del document.header[i]
+
+
+def revert_mdnomath(document):
+ " Revert mathdesign and fourier without math "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ mathdesign_dict = {
+ "md-charter": "mdbch",
+ "md-utopia": "mdput",
+ "md-garamond": "mdugm"
+ }
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ return
+ val = get_value(document.header, "\\font_roman", i)
+ if val in mathdesign_dict.keys():
+ j = find_token(document.header, "\\font_math", 0)
+ if j == -1:
+ document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
+ mval = get_value(document.header, "\\font_math", j)
+ if mval == "default":
+ document.header[i] = "\\font_roman default"
+ add_to_preamble(document, "\\renewcommand{\\rmdefault}{%s}" % mathdesign_dict[val])
+ else:
+ document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
+
+
+def convert_mdnomath(document):
+ " Change mathdesign font name "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ mathdesign_dict = {
+ "mdbch": "md-charter",
+ "mdput": "md-utopia",
+ "mdugm": "md-garamond"
+ }
+ i = find_token(document.header, "\\font_roman", 0)
+ if i == -1:
+ return
+ val = get_value(document.header, "\\font_roman", i)
+ if val in mathdesign_dict.keys():
+ document.header[i] = "\\font_roman %s" % mathdesign_dict[val]
+
+
+def revert_newtxmath(document):
+ " Revert native newtxmath definitions to LaTeX "
+
+ i = find_token(document.header, "\\font_math", 0)
+ if i == -1:
+ return
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ val = get_value(document.header, "\\font_math", i)
+ mathfont_dict = {
+ "libertine-ntxm": "\\usepackage[libertine]{newtxmath}",
+ "minion-ntxm": "\\usepackage[minion]{newtxmath}",
+ "newtxmath": "\\usepackage{newtxmath}",
+ }
+ if val in mathfont_dict.keys():
+ add_to_preamble(document, mathfont_dict[val])
+ document.header[i] = "\\font_math auto"
+
+
+def revert_biolinum(document):
+ " Revert native biolinum font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_sans biolinum", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if not osf:
+ preamble += "[lf]"
+ preamble += "{biolinum-type1}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_sans default"
+
+
+def revert_uop(document):
+ " Revert native URW Classico (Optima) font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_sans uop", 0)
+ if i != -1:
+ preamble = "\\renewcommand{\\sfdefault}{uop}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_sans default"
+
+
+def convert_latexargs(document):
+ " Convert InsetArgument to new syntax "
+
+ if find_token(document.body, "\\begin_inset Argument", 0) == -1:
+ # nothing to do.
+ return
+
+ # A list of layouts (document classes) with only optional or no arguments.
+ # These can be safely converted to the new syntax
+ # (I took the liberty to add some of my personal layouts/modules here; JSP)
+ safe_layouts = ["aa", "aapaper", "aastex", "achemso", "acmsiggraph", "AEA",
+ "agu-dtd", "agums", "agutex", "amsart", "amsbook", "apa",
+ "arab-article", "armenian-article", "article-beamer", "article",
+ "beamer", "book", "broadway", "chess", "cl2emult", "ctex-article",
+ "ctex-book", "ctex-report", "dinbrief", "docbook-book", "docbook-chapter",
+ "docbook", "docbook-section", "doublecol-new", "dtk", "ectaart", "egs",
+ "elsarticle", "elsart", "entcs", "europecv", "extarticle", "extbook",
+ "extletter", "extreport", "foils", "frletter", "g-brief2", "g-brief",
+ "heb-article", "heb-letter", "hollywood", "IEEEtran", "ijmpc", "ijmpd",
+ "iopart", "isprs", "jarticle", "jasatex", "jbook", "jgrga", "jreport",
+ "jsarticle", "jsbeamer", "jsbook", "jss", "kluwer", "latex8", "letter", "lettre",
+ "literate-article", "literate-book", "literate-report", "llncs", "ltugboat",
+ "memoir", "moderncv", "mwart", "mwbk", "mwrep", "paper", "powerdot",
+ "recipebook", "report", "revtex4", "revtex", "scrartcl", "scrarticle-beamer",
+ "scrbook", "scrlettr", "scrlttr2", "scrreprt", "seminar", "siamltex",
+ "sigplanconf", "simplecv", "singlecol", "singlecol-new", "slides", "spie",
+ "svglobal3", "svglobal", "svjog", "svmono", "svmult", "svprobth", "tarticle",
+ "tbook", "treport", "tufte-book", "tufte-handout"]
+ # A list of "safe" modules, same as above
+ safe_modules = ["biblatex", "beameraddons", "beamersession", "braille", "customHeadersFooters",
+ "endnotes", "enumitem", "eqs-within-sections", "figs-within-sections", "fix-cm",
+ "fixltx2e", "foottoend", "hanging", "jscharstyles", "knitr", "lilypond",
+ "linguistics", "linguisticx", "logicalmkup", "minimalistic", "nomindex", "noweb",
+ "pdfcomment", "sweave", "tabs-within-sections", "theorems-ams-bytype",
+ "theorems-ams-extended-bytype", "theorems-ams-extended", "theorems-ams", "theorems-bytype",
+ "theorems-chap-bytype", "theorems-chap", "theorems-named", "theorems-sec-bytype",
+ "theorems-sec", "theorems-starred", "theorems-std", "todonotes"]
+ # Modules we need to take care of
+ caveat_modules = ["initials"]
+ # information about the relevant styles in caveat_modules (number of opt and req args)
+ # use this if we get more caveat_modules. For now, use hard coding (see below).
+ # initials = [{'Layout' : 'Initial', 'opt' : 1, 'req' : 1}]
+
+ # Is this a known safe layout?
+ safe_layout = document.textclass in safe_layouts
+ if not safe_layout:
+ document.warning("Lyx2lyx knows nothing about textclass '%s'. "
+ "Please check if short title insets have been converted correctly."
+ % document.textclass)
+ # Do we use unsafe or unknown modules
+ mods = document.get_module_list()
+ unknown_modules = False
+ used_caveat_modules = list()
+ for mod in mods:
+ if mod in safe_modules:
+ continue
+ if mod in caveat_modules:
+ used_caveat_modules.append(mod)
+ continue
+ unknown_modules = True
+ document.warning("Lyx2lyx knows nothing about module '%s'. "
+ "Please check if short title insets have been converted correctly."
+ % mod)
+
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Argument", i)
+ if i == -1:
+ return
+
+ if not safe_layout or unknown_modules:
+ # We cannot do more here since we have no access to this layout.
+ # InsetArgument itself will do the real work
+ # (see InsetArgument::updateBuffer())
+ document.body[i] = "\\begin_inset Argument 999"
+ i = i + 1
+ continue
+
+ # Find containing paragraph layout
+ parent = get_containing_layout(document.body, i)
+ if parent == False:
+ document.warning("Malformed lyx document: Can't find parent paragraph layout")
+ i = i + 1
+ continue
+ parbeg = parent[1]
+ parend = parent[2]
+ allowed_opts = -1
+ first_req = -1
+ if len(used_caveat_modules) > 0:
+ # We know for now that this must be the initials module with the Initial layout
+ # If we get more such modules, we need some automating.
+ if parent[0] == "Initial":
+ # Layout has 1 opt and 1 req arg.
+ # Count the actual arguments
+ actualargs = 0
+ for p in range(parbeg, parend):
+ if document.body[p] == "\\begin_inset Argument":
+ actualargs += 1
+ if actualargs == 1:
+ allowed_opts = 0
+ first_req = 2
+ # Collect all arguments in this paragraph
+ argnr = 0
+ for p in range(parbeg, parend):
+ if document.body[p] == "\\begin_inset Argument":
+ argnr += 1
+ if allowed_opts != -1:
+ # We have less arguments than opt + required.
+ # required must take precedence.
+ if argnr > allowed_opts and argnr < first_req:
+ argnr = first_req
+ document.body[p] = "\\begin_inset Argument %d" % argnr
+ i = i + 1
+
+
+def revert_latexargs(document):
+ " Revert InsetArgument to old syntax "
+
+ i = 0
+ rx = re.compile(r'^\\begin_inset Argument (\d+)$')
+ args = dict()
+ while True:
+ # Search for Argument insets
+ i = find_token(document.body, "\\begin_inset Argument", i)
+ if i == -1:
+ return
+ m = rx.match(document.body[i])
+ if not m:
+ # No ID: inset already reverted
+ i = i + 1
+ continue
+ # Find containing paragraph layout
+ parent = get_containing_layout(document.body, i)
+ if parent == False:
+ document.warning("Malformed lyx document: Can't find parent paragraph layout")
+ i = i + 1
+ continue
+ parbeg = parent[1]
+ parend = parent[2]
+ # Collect all arguments in this paragraph
+ realparend = parend
+ for p in range(parbeg, parend):
+ m = rx.match(document.body[p])
+ if m:
+ val = int(m.group(1))
+ j = find_end_of_inset(document.body, p)
+ # Revert to old syntax
+ document.body[p] = "\\begin_inset Argument"
+ if j == -1:
+ document.warning("Malformed lyx document: Can't find end of Argument inset")
+ continue
+ if val > 0:
+ args[val] = document.body[p : j + 1]
+ # Adjust range end
+ realparend = realparend - len(document.body[p : j + 1])
+ # Remove arg inset at this position
+ del document.body[p : j + 1]
+ if p >= realparend:
+ break
+ # Now sort the arg insets
+ subst = [""]
+ for f in sorted(args):
+ subst += args[f]
+ del args[f]
+ # Insert the sorted arg insets at paragraph begin
+ document.body[parbeg + 1:parbeg + 1] = subst
+
+ i = parbeg + 1 + len(subst)
+
+
+def revert_Argument_to_TeX_brace(document, line, n, nmax, environment):
+ '''
+ Reverts an InsetArgument to TeX-code
+ usage:
+ revert_Argument_to_TeX_brace(document, LineOfBeginLayout, StartArgument, EndArgument, isEnvironment)
+ LineOfBeginLayout is the line of the \begin_layout statement
+ StartArgument is the number of the first argument that needs to be converted
+ EndArgument is the number of the last argument that needs to be converted or the last defined one
+ isEnvironment must be true, if the layout id for a LaTeX environment
+ '''
+ lineArg = 0
+ while lineArg != -1 and n < nmax + 1:
+ lineArg = find_token(document.body, "\\begin_inset Argument " + str(n), line)
+ if lineArg != -1:
+ beginPlain = find_token(document.body, "\\begin_layout Plain Layout", lineArg)
+ # we have to assure that no other inset is in the Argument
+ beginInset = find_token(document.body, "\\begin_inset", beginPlain)
+ endInset = find_token(document.body, "\\end_inset", beginPlain)
+ k = beginPlain + 1
+ l = k
+ while beginInset < endInset and beginInset != -1:
+ beginInset = find_token(document.body, "\\begin_inset", k)
+ endInset = find_token(document.body, "\\end_inset", l)
+ k = beginInset + 1
+ l = endInset + 1
+ if environment == False:
+ document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}{")
+ del(document.body[lineArg : beginPlain + 1])
+ else:
+ document.body[endInset - 2 : endInset + 1] = put_cmd_in_ert("}")
+ document.body[lineArg : beginPlain + 1] = put_cmd_in_ert("{")
+ n = n + 1
+
+
+def revert_IEEEtran(document):
+ '''
+ Reverts InsetArgument of
+ Page headings
+ Biography
+ Biography without photo
+ to TeX-code
+ '''
+ if document.textclass == "IEEEtran":
+ i = 0
+ j = 0
+ k = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Page headings", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 1, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout Biography without photo", j)
+ if j != -1:
+ revert_Argument_to_TeX_brace(document, j, 1, 1, True)
+ j = j + 1
+ if k != -1:
+ k = find_token(document.body, "\\begin_layout Biography", k)
+ kA = find_token(document.body, "\\begin_layout Biography without photo", k)
+ if k == kA and k != -1:
+ k = k + 1
+ continue
+ if k != -1:
+ # start with the second argument, therefore 2
+ revert_Argument_to_TeX_brace(document, k, 2, 2, True)
+ k = k + 1
+ if i == -1 and j == -1 and k == -1:
+ return
+
+
+def convert_TeX_brace_to_Argument(document, line, n, nmax, inset, environment):
+ '''
+ Converts TeX code for mandatory arguments to an InsetArgument
+ The conversion of TeX code for optional arguments must be done with another routine
+ !!! Be careful if the braces are different in your case as expected here:
+ - "}{" separates mandatory arguments of commands
+ - "}" + "{" separates mandatory arguments of commands
+ - "}" + " " + "{" separates mandatory arguments of commands
+ - { and } surround a mandatory argument of an environment
+ usage:
+ convert_TeX_brace_to_Argument(document, LineOfBeginLayout/Inset, StartArgument, EndArgument, isInset, isEnvironment)
+ LineOfBeginLayout/Inset is the line of the \begin_layout or \begin_inset statement
+ StartArgument is the number of the first ERT that needs to be converted
+ EndArgument is the number of the last ERT that needs to be converted
+ isInset must be true, if braces inside an InsetLayout needs to be converted
+ isEnvironment must be true, if the layout is for a LaTeX environment
+
+ Todo: this routine can currently handle only one mandatory argument of environments
+ '''
+ lineERT = line
+ endn = line
+ loop = 1
+ while lineERT != -1 and n < nmax + 1:
+ lineERT = find_token(document.body, "\\begin_inset ERT", lineERT)
+ if environment == False and lineERT != -1:
+ bracePair = find_token(document.body, "}{", lineERT)
+ # assure that the "}{" is in this ERT
+ if bracePair == lineERT + 5:
+ end = find_token(document.body, "\\end_inset", bracePair)
+ document.body[lineERT : end + 1] = ["\\end_layout", "", "\\end_inset"]
+ if loop == 1:
+ # in the case that n > 1 we have optional arguments before
+ # therefore detect them if any
+ if n > 1:
+ # first check if there is an argument
+ lineArg = find_token(document.body, "\\begin_inset Argument", line)
+ if lineArg < lineERT and lineArg != -1:
+ # we have an argument, so now search backwards for its end
+ # we must now assure that we don't find other insets like e.g. a newline
+ endInsetArg = lineERT
+ endLayoutArg = endInsetArg
+ while endInsetArg != endLayoutArg + 2 and endInsetArg != -1:
+ endInsetArg = endInsetArg - 1
+ endLayoutArg = endInsetArg
+ endInsetArg = find_token_backwards(document.body, "\\end_inset", endInsetArg)
+ endLayoutArg = find_token_backwards(document.body, "\\end_layout", endLayoutArg)
+ line = endInsetArg + 1
+ if inset == False:
+ document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ else:
+ document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ else:
+ document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ n = n + 1
+ endn = end
+ loop = loop + 1
+ # now check the case that we have "}" + "{" in two ERTs
+ else:
+ endBrace = find_token(document.body, "}", lineERT)
+ if endBrace == lineERT + 5:
+ beginBrace = find_token(document.body, "{", endBrace)
+ # assure that the ERTs are consecutive (11 or 12 depending if there is a space between the ERTs or not)
+ if beginBrace == endBrace + 11 or beginBrace == endBrace + 12:
+ end = find_token(document.body, "\\end_inset", beginBrace)
+ document.body[lineERT : end + 1] = ["\\end_layout", "", "\\end_inset"]
+ if loop == 1:
+ # in the case that n > 1 we have optional arguments before
+ # therefore detect them if any
+ if n > 1:
+ # first check if there is an argument
+ lineArg = find_token(document.body, "\\begin_inset Argument", line)
+ if lineArg < lineERT and lineArg != -1:
+ # we have an argument, so now search backwards for its end
+ # we must now assure that we don't find other insets like e.g. a newline
+ endInsetArg = lineERT
+ endLayoutArg = endInsetArg
+ while endInsetArg != endLayoutArg + 2 and endInsetArg != -1:
+ endInsetArg = endInsetArg - 1
+ endLayoutArg = endInsetArg
+ endInsetArg = find_token_backwards(document.body, "\\end_inset", endInsetArg)
+ endLayoutArg = find_token_backwards(document.body, "\\end_layout", endLayoutArg)
+ line = endInsetArg + 1
+ if inset == False:
+ document.body[line + 1 : line + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ else:
+ document.body[line + 4 : line + 4] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ else:
+ document.body[endn : endn] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ n = n + 1
+ loop = loop + 1
+ # set the line where the next argument will be inserted
+ if beginBrace == endBrace + 11:
+ endn = end - 11
+ else:
+ endn = end - 12
+ else:
+ lineERT = lineERT + 1
+ if environment == True and lineERT != -1:
+ opening = find_token(document.body, "{", lineERT)
+ if opening == lineERT + 5: # assure that the "{" is in this ERT
+ end = find_token(document.body, "\\end_inset", opening)
+ document.body[lineERT : end + 1] = ["\\begin_inset Argument " + str(n), "status open", "", "\\begin_layout Plain Layout"]
+ n = n + 1
+ lineERT2 = find_token(document.body, "\\begin_inset ERT", lineERT)
+ closing = find_token(document.body, "}", lineERT2)
+ if closing == lineERT2 + 5: # assure that the "}" is in this ERT
+ end2 = find_token(document.body, "\\end_inset", closing)
+ document.body[lineERT2 : end2 + 1] = ["\\end_layout", "", "\\end_inset"]
+ else:
+ lineERT = lineERT + 1
+
+
+def convert_IEEEtran(document):
+ '''
+ Converts ERT of
+ Page headings
+ Biography
+ Biography without photo
+ to InsetArgument
+ '''
+ if document.textclass == "IEEEtran":
+ i = 0
+ j = 0
+ k = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Page headings", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout Biography without photo", j)
+ if j != -1:
+ convert_TeX_brace_to_Argument(document, j, 1, 1, False, True)
+ j = j + 1
+ if k != -1:
+ # assure that we don't handle Biography Biography without photo
+ k = find_token(document.body, "\\begin_layout Biography", k)
+ kA = find_token(document.body, "\\begin_layout Biography without photo", k - 1)
+ if k == kA and k != -1:
+ k = k + 1
+ continue
+ if k != -1:
+ # the argument we want to convert is the second one
+ convert_TeX_brace_to_Argument(document, k, 2, 2, False, True)
+ k = k + 1
+ if i == -1 and j == -1 and k == -1:
+ return
+
+
+def revert_AASTeX(document):
+ " Reverts InsetArgument of Altaffilation to TeX-code "
+ if document.textclass == "aastex":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Altaffilation", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 1, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def convert_AASTeX(document):
+ " Converts ERT of Altaffilation to InsetArgument "
+ if document.textclass == "aastex":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Altaffilation", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def revert_AGUTeX(document):
+ " Reverts InsetArgument of Author affiliation to TeX-code "
+ if document.textclass == "agutex":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Author affiliation", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 1, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def convert_AGUTeX(document):
+ " Converts ERT of Author affiliation to InsetArgument "
+ if document.textclass == "agutex":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Author affiliation", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def revert_IJMP(document):
+ " Reverts InsetArgument of MarkBoth to TeX-code "
+ if document.textclass == "ijmpc" or document.textclass == "ijmpd":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout MarkBoth", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 1, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def convert_IJMP(document):
+ " Converts ERT of MarkBoth to InsetArgument "
+ if document.textclass == "ijmpc" or document.textclass == "ijmpd":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout MarkBoth", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def revert_SIGPLAN(document):
+ " Reverts InsetArgument of MarkBoth to TeX-code "
+ if document.textclass == "sigplanconf":
+ i = 0
+ j = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Conference", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 1, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout Author", j)
+ if j != -1:
+ revert_Argument_to_TeX_brace(document, j, 1, 2, False)
+ j = j + 1
+ if i == -1 and j == -1:
+ return
+
+
+def convert_SIGPLAN(document):
+ " Converts ERT of MarkBoth to InsetArgument "
+ if document.textclass == "sigplanconf":
+ i = 0
+ j = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Conference", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 1, False, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout Author", j)
+ if j != -1:
+ convert_TeX_brace_to_Argument(document, j, 1, 2, False, False)
+ j = j + 1
+ if i == -1 and j == -1:
+ return
+
+
+def revert_SIGGRAPH(document):
+ " Reverts InsetArgument of Flex CRcat to TeX-code "
+ if document.textclass == "acmsiggraph":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_inset Flex CRcat", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 1, 3, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def convert_SIGGRAPH(document):
+ " Converts ERT of Flex CRcat to InsetArgument "
+ if document.textclass == "acmsiggraph":
+ i = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_inset Flex CRcat", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 1, 3, True, False)
+ i = i + 1
+ if i == -1:
+ return
+
+
+def revert_EuropeCV(document):
+ " Reverts InsetArgument of Flex CRcat to TeX-code "
+ if document.textclass == "europecv":
+ i = 0
+ j = 0
+ k = 0
+ m = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Item", i)
+ if i != -1:
+ revert_Argument_to_TeX_brace(document, i, 2, 2, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout BulletedItem", j)
+ if j != -1:
+ revert_Argument_to_TeX_brace(document, j, 2, 2, False)
+ j = j + 1
+ if k != -1:
+ k = find_token(document.body, "\\begin_layout Language", k)
+ if k != -1:
+ revert_Argument_to_TeX_brace(document, k, 2, 6, False)
+ k = k + 1
+ if m != -1:
+ m = find_token(document.body, "\\begin_layout LastLanguage", m)
+ if m != -1:
+ revert_Argument_to_TeX_brace(document, m, 2, 6, False)
+ m = m + 1
+ if i == -1 and j == -1 and k == -1 and m == -1:
+ return
+
+
+def convert_EuropeCV(document):
+ " Converts ERT of Flex CRcat to InsetArgument "
+ if document.textclass == "europecv":
+ i = 0
+ j = 0
+ k = 0
+ m = 0
+ while True:
+ if i != -1:
+ i = find_token(document.body, "\\begin_layout Item", i)
+ if i != -1:
+ convert_TeX_brace_to_Argument(document, i, 2, 2, False, False)
+ i = i + 1
+ if j != -1:
+ j = find_token(document.body, "\\begin_layout BulletedItem", j)
+ if j != -1:
+ convert_TeX_brace_to_Argument(document, j, 2, 2, False, False)
+ j = j + 1
+ if k != -1:
+ k = find_token(document.body, "\\begin_layout Language", k)
+ if k != -1:
+ convert_TeX_brace_to_Argument(document, k, 2, 6, False, False)
+ k = k + 1
+ if m != -1:
+ m = find_token(document.body, "\\begin_layout LastLanguage", m)
+ if m != -1:
+ convert_TeX_brace_to_Argument(document, m, 2, 6, False, False)
+ m = m + 1
+ if i == -1 and j == -1 and k == -1 and m == -1:
+ return
+
+
+def revert_literate(document):
+ " Revert Literate document to old format "
+ if del_token(document.header, "noweb", 0):
+ document.textclass = "literate-" + document.textclass
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_layout Chunk", i)
+ if i == -1:
+ break
+ document.body[i] = "\\begin_layout Scrap"
+ i = i + 1
+
+
+def convert_literate(document):
+ " Convert Literate document to new format"
+ i = find_token(document.header, "\\textclass", 0)
+ if (i != -1) and "literate-" in document.header[i]:
+ document.textclass = document.header[i].replace("\\textclass literate-", "")
+ j = find_token(document.header, "\\begin_modules", 0)
+ if (j != -1):
+ document.header.insert(j + 1, "noweb")
+ else:
+ document.header.insert(i + 1, "\\end_modules")
+ document.header.insert(i + 1, "noweb")
+ document.header.insert(i + 1, "\\begin_modules")
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_layout Scrap", i)
+ if i == -1:
+ break
+ document.body[i] = "\\begin_layout Chunk"
+ i = i + 1
+
+
+def revert_itemargs(document):
+ " Reverts \\item arguments to TeX-code "
+ i = 0
+ while True:
+ i = find_token(document.body, "\\begin_inset Argument item:", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ # Find containing paragraph layout
+ parent = get_containing_layout(document.body, i)
+ if parent == False:
+ document.warning("Malformed lyx document: Can't find parent paragraph layout")
+ i = i + 1
+ continue
+ parbeg = parent[1]
+ beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
+ endPlain = find_end_of_layout(document.body, beginPlain)
+ content = document.body[beginPlain + 1 : endPlain]
+ del document.body[i:j+1]
+ subst = put_cmd_in_ert("[") + content + put_cmd_in_ert("]")
+ document.body[parbeg + 1:parbeg + 1] = subst
+ i = i + 1
+
+
+def revert_garamondx_newtxmath(document):
+ " Revert native garamond newtxmath definition to LaTeX "
+
+ i = find_token(document.header, "\\font_math", 0)
+ if i == -1:
+ return
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ val = get_value(document.header, "\\font_math", i)
+ if val == "garamondx-ntxm":
+ add_to_preamble(document, "\\usepackage[garamondx]{newtxmath}")
+ document.header[i] = "\\font_math auto"
+
+
+def revert_garamondx(document):
+ " Revert native garamond font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_roman garamondx", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ preamble += "[osfI]"
+ preamble += "{garamondx}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = "\\font_roman default"
+
+
##
# Conversion hub
#
[422, [convert_use_packages]],
[423, [convert_use_mathtools]],
[424, [convert_cite_engine_type]],
+ [425, []],
+ [426, []],
+ [427, []],
+ [428, [convert_cell_rotation]],
+ [429, [convert_table_rotation]],
+ [430, [convert_listoflistings]],
+ [431, [convert_use_amssymb]],
+ [432, []],
+ [433, [convert_armenian]],
+ [434, []],
+ [435, []],
+ [436, []],
+ [437, []],
+ [438, []],
+ [439, []],
+ [440, []],
+ [441, [convert_mdnomath]],
+ [442, []],
+ [443, []],
+ [444, []],
+ [445, []],
+ [446, [convert_latexargs]],
+ [447, [convert_IEEEtran, convert_AASTeX, convert_AGUTeX, convert_IJMP, convert_SIGPLAN, convert_SIGGRAPH, convert_EuropeCV]],
+ [448, [convert_literate]],
+ [449, []],
+ [450, []]
]
revert = [
+ [449, [revert_garamondx, revert_garamondx_newtxmath]],
+ [448, [revert_itemargs]],
+ [447, [revert_literate]],
+ [446, [revert_IEEEtran, revert_AASTeX, revert_AGUTeX, revert_IJMP, revert_SIGPLAN, revert_SIGGRAPH, revert_EuropeCV]],
+ [445, [revert_latexargs]],
+ [444, [revert_uop]],
+ [443, [revert_biolinum]],
+ [442, []],
+ [441, [revert_newtxmath]],
+ [440, [revert_mdnomath]],
+ [439, [revert_mathfonts]],
+ [438, [revert_minionpro]],
+ [437, [revert_ipadeco, revert_ipachar]],
+ [436, [revert_texgyre]],
+ [435, [revert_mathdesign]],
+ [434, [revert_txtt]],
+ [433, [revert_libertine]],
+ [432, [revert_armenian]],
+ [431, [revert_languages, revert_ancientgreek]],
+ [430, [revert_use_amssymb]],
+ [429, [revert_listoflistings]],
+ [428, [revert_table_rotation]],
+ [427, [revert_cell_rotation]],
+ [426, [revert_tipa]],
+ [425, [revert_verbatim]],
+ [424, [revert_cancel]],
[423, [revert_cite_engine_type]],
[422, [revert_use_mathtools]],
[421, [revert_use_packages]],
[418, [revert_australian]],
[417, [revert_justification]],
[416, [revert_japanese_encodings]],
- [415, [revert_negative_space,revert_math_spaces]],
+ [415, [revert_negative_space, revert_math_spaces]],
[414, [revert_undertilde]],
[413, [revert_visible_space]]
]