# Uncomment only what you need to import, please.
-from parser_tools import find_end_of, find_token_backwards, find_end_of_layout#,
-# find_token, find_tokens, \
-# find_token_exact, find_end_of_inset, \
-# is_in_inset, get_value, get_quoted_value, \
-# del_token, check_token, get_option_value, get_bool_value
-
-from parser_tools import find_token, find_end_of_inset, get_value, \
- get_bool_value, get_containing_layout
+from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
+ find_token, find_end_of_inset, get_value, get_bool_value, \
+ get_containing_layout, get_quoted_value, del_token
+# find_tokens, find_token_exact, is_in_inset, \
+# check_token, get_option_value
from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
# get_ert, lyx2latex, \
# insert_to_preamble, latex_length, revert_flex_inset, \
# revert_font_attrs, hex2ratio, str2bool
-from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
-
####################################################################
# Private helper functions
if document.textclass == "scrarticle-beamer":
inclusion = "scrartcl.layout"
- while True:
- i = find_token(document.header, "\\begin_local_layout", 0)
- if i == -1:
- k = find_token(document.header, "\\language", 0)
- if k == -1:
- # this should not happen
- document.warning("Malformed LyX document! No \\language header found!")
- break
- document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
- i = find_token(document.header, "\\begin_local_layout", 0)
- if i != -1:
- j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
- if j == -1:
- # this should not happen
- break
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k - 1
- document.header[i+1 : i+1] = ["### Inserted by lyx2lyx (more [scr]article styles) ###",
- "Input " + inclusion,
- "Input beamer.layout",
- "Provides geometry 0",
- "Provides hyperref 0",
- "DefaultFont",
- " Family Roman",
- " Series Medium",
- " Shape Up",
- " Size Normal",
- " Color None",
- "EndFont",
- "Preamble",
- " \\usepackage{beamerarticle,pgf}",
- " % this default might be overridden by plain title style",
- " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
- " \\AtBeginDocument{",
- " \\let\\origtableofcontents=\\tableofcontents",
- " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
- " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
- " }",
- "EndPreamble",
- "### End of insertion by lyx2lyx (more [scr]article styles) ###"]
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
return
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (more [scr]article styles) ###",
+ "Input " + inclusion,
+ "Input beamer.layout",
+ "Provides geometry 0",
+ "Provides hyperref 0",
+ "DefaultFont",
+ " Family Roman",
+ " Series Medium",
+ " Shape Up",
+ " Size Normal",
+ " Color None",
+ "EndFont",
+ "Preamble",
+ " \\usepackage{beamerarticle,pgf}",
+ " % this default might be overridden by plain title style",
+ " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
+ " \\AtBeginDocument{",
+ " \\let\\origtableofcontents=\\tableofcontents",
+ " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
+ " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
+ " }",
+ "EndPreamble",
+ "### End of insertion by lyx2lyx (more [scr]article styles) ###"
+ ]
+
def convert_beamer_article_styles(document):
" Remove included (scr)article styles in beamer article "
if document.textclass not in beamer_articles:
return
- while True:
- i = find_token(document.header, "\\begin_local_layout", 0)
- if i == -1:
- return
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
- j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
- if j == -1:
- # this should not happen
- break
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
+ return
- k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
- if k != -1:
- l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
- if l == -1:
- # this should not happen
- document.warning("End of lyx2lyx local layout insertion not found!")
- break
+ k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
document.header[k : l + 1] = []
- return
-
def revert_bosnian(document):
"Set the document language to English but assure Bosnian output"
continue
# Now handle Hebrew
+ if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
+ return
+
i = 0
j = 0
while True:
i = l
+def revert_iopart(document):
+ " Input new styles via local layout "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k-1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (stdlayouts) ###",
+ "Input stdlayouts.inc",
+ "### End of insertion by lyx2lyx (stdlayouts) ###"
+ ]
+
+
+def convert_iopart(document):
+ " Remove local layout we added, if it is there "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
+ document.header[k : l + 1] = []
+
+
+def convert_quotestyle(document):
+ " Convert \\quotes_language to \\quotes_style "
+ i = find_token(document.header, "\\quotes_language", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_language!")
+ return
+ val = get_value(document.header, "\\quotes_language", i)
+ document.header[i] = "\\quotes_style " + val
+
+
+def revert_quotestyle(document):
+ " Revert \\quotes_style to \\quotes_language "
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_style!")
+ return
+ val = get_value(document.header, "\\quotes_style", i)
+ document.header[i] = "\\quotes_language " + val
+
+
+def revert_plainquote(document):
+ " Revert plain quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style plain", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes q', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ i = l
+
+
+def convert_frenchquotes(document):
+ " Convert french quote insets to swiss "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style french", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style swiss"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("f", "c", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swissquotes(document):
+ " Revert swiss quote insets to french "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swiss", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes c', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("c", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_britishquotes(document):
+ " Revert british quote insets to english "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style british", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes b', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("b", "e", 1)
+ if val[2] == "d":
+ # opening mark
+ newval = newval.replace("d", "s")
+ else:
+ # closing mark
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swedishgquotes(document):
+ " Revert swedish quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swedishg", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style danish"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes w', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "d":
+ # outer marks
+ newval = val.replace("w", "a", 1).replace("r", "l")
+ else:
+ # inner marks
+ newval = val.replace("w", "s", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchquotes(document):
+ " Revert french inner quote insets "
+
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("f", "e", 1).replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchinquotes(document):
+ " Revert inner frenchin quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style frenchin", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes i', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("i", "f", 1)
+ if val[2] == "s":
+ # inner marks
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_russianquotes(document):
+ " Revert russian quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style russian", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes r', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("r", "g", 1).replace("s", "d")
+ else:
+ # outer marks
+ newval = val.replace("r", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_dynamicquotes(document):
+ " Revert dynamic quote insets "
+
+ # First, revert header
+ i = find_token(document.header, "\\dynamic_quotes", 0)
+ if i != -1:
+ del document.header[i]
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ s = "e"
+ if style == "english":
+ s = "e"
+ elif style == "swedish":
+ s = "s"
+ elif style == "german":
+ s = "g"
+ elif style == "polish":
+ s = "p"
+ elif style == "swiss":
+ s = "c"
+ elif style == "danish":
+ s = "a"
+ elif style == "plain":
+ s = "q"
+ elif style == "british":
+ s = "b"
+ elif style == "swedishg":
+ s = "w"
+ elif style == "french":
+ s = "f"
+ elif style == "frenchin":
+ s = "i"
+ elif style == "russian":
+ s = "r"
+
+ # now transform the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ return
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+
+def revert_cjkquotes(document):
+ " Revert cjk quote insets "
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ global_cjk = style.find("cjk") != -1
+
+ if global_cjk:
+ document.header[i] = "\\quotes_style english"
+ # transform dynamic insets
+ s = "j"
+ if style == "cjkangle":
+ s = "k"
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ break
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+ cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes j', i)
+ if k == -1:
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u300E"]
+ else:
+ replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u300F"]
+ else:
+ replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300C"]
+ else:
+ replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300D"]
+ else:
+ replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes k', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u3008"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u3009"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300A"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300B"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+
+def revert_crimson(document):
+ " Revert native Cochineal/Crimson font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ preamble = ""
+ i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ document.header[j] = "\\font_osf false"
+ preamble += "[proportional,osf]"
+ preamble += "{cochineal}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = document.header[i].replace("cochineal", "default")
+
+
+def revert_cochinealmath(document):
+ " Revert cochineal newtxmath definitions to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
+ if i != -1:
+ add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
+ document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
+
+
+def revert_labelonly(document):
+ " Revert labelonly tag for InsetRef "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ if k == -1:
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([label])
+ i += 1
+
+
+def revert_plural_refs(document):
+ " Revert plural and capitalized references "
+ i = find_token(document.header, "\\use_refstyle 1", 0)
+ use_refstyle = (i != 0)
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ plural = caps = suffix = False
+ k = find_token(document.body, "LaTeXCommand formatted", i, j)
+ if k != -1 and use_refstyle:
+ plural = get_bool_value(document.body, "plural", i, j, False)
+ caps = get_bool_value(document.body, "caps", i, j, False)
+ label = get_quoted_value(document.body, "reference", i, j)
+ if label:
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ else:
+ document.warning("Can't find label for reference at line %d!" % (i))
+
+ # this effectively tests also for use_refstyle and a formatted reference
+ # we do this complicated test because we would otherwise do this erasure
+ # over and over and over
+ if not ((plural or caps) and suffix):
+ del_token(document.body, "plural", i, j)
+ del_token(document.body, "caps", i, j - 1) # since we deleted a line
+ i = j - 1
+ continue
+
+ if caps:
+ prefix = prefix[0].title() + prefix[1:]
+ cmd = "\\" + prefix + "ref"
+ if plural:
+ cmd += "[s]"
+ cmd += "{" + suffix + "}"
+ document.body[i:j+1] = put_cmd_in_ert([cmd])
+ i += 1
+
+
+def revert_noprefix(document):
+ " Revert labelonly tags with 'noprefix' set "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ if k == -1:
+ i = j
+ continue
+ noprefix = get_bool_value(document.body, "noprefix", i, j)
+ if not noprefix:
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ # we'll leave this as an ordinary labelonly reference
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([suffix])
+ i += 1
+
##
# Conversion hub
[514, []],
[515, []],
[516, [convert_inputenc]],
- [517, []]
+ [517, []],
+ [518, [convert_iopart]],
+ [519, [convert_quotestyle]],
+ [520, []],
+ [521, [convert_frenchquotes]],
+ [522, []],
+ [523, []],
+ [524, []],
+ [525, []],
+ [526, []],
+ [527, []]
]
revert = [
+ [526, [revert_noprefix]],
+ [525, [revert_plural_refs]],
+ [524, [revert_labelonly]],
+ [523, [revert_crimson, revert_cochinealmath]],
+ [522, [revert_cjkquotes]],
+ [521, [revert_dynamicquotes]],
+ [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
+ [519, [revert_plainquote]],
+ [518, [revert_quotestyle]],
+ [517, [revert_iopart]],
[516, [revert_quotes]],
[515, []],
[514, [revert_urdu, revert_syriac]],