# Uncomment only what you need to import, please.
-#from parser_tools import find_token, find_end_of, find_tokens, \
-# find_token_exact, find_end_of_inset, find_end_of_layout, \
-# find_token_backwards, is_in_inset, get_value, get_quoted_value, \
-# del_token, check_token, get_option_value
+from parser_tools import find_end_of, find_token_backwards, find_end_of_layout, \
+ find_token, find_end_of_inset, get_value, get_bool_value, \
+ get_containing_layout, get_quoted_value, del_token
+# find_tokens, find_token_exact, is_in_inset, \
+# check_token, get_option_value
-from parser_tools import find_token, find_end_of_inset, get_value
-
-#from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert, get_ert, lyx2latex, \
+from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
+# get_ert, lyx2latex, \
# lyx2verbatim, length_in_bp, convert_info_insets
# insert_to_preamble, latex_length, revert_flex_inset, \
# revert_font_attrs, hex2ratio, str2bool
-from lyx2lyx_tools import add_to_preamble, put_cmd_in_ert
-
####################################################################
# Private helper functions
i = find_token(document.header, "\\font_tt_scale" , 0)
if i == -1:
document.warning("Malformed LyX document: Can't find \\font_tt_scale.")
- return;
+ i = len(document.header) - 1
+
j = find_token(document.preamble, "\\usepackage{microtype}", 0)
if j == -1:
- document.header.insert(i + 1, "\\use_microtype 0")
+ document.header.insert(i + 1, "\\use_microtype false")
else:
- document.header.insert(i + 1, "\\use_microtype 1")
+ document.header.insert(i + 1, "\\use_microtype true")
del document.preamble[j]
i = find_token(document.header, "\\use_microtype", 0)
if i == -1:
return
- value = get_value(document.header, "\\use_microtype" , i).split()[0]
+ use_microtype = get_bool_value(document.header, "\\use_microtype" , i)
del document.header[i]
- if value == "1":
+ if use_microtype:
add_to_preamble(document, ["\\usepackage{microtype}"])
continue
+def convert_inputenc(document):
+ " Replace no longer supported input encoding settings. "
+ i = find_token(document.header, "\\inputenc", 0)
+ if i == -1:
+ return
+ if get_value(document.header, "\\inputencoding", i) == "pt254":
+ document.header[i] = "\\inputencoding pt154"
+
+
def convert_ibranches(document):
' Add "inverted 0" to branch insets'
i = 0
# these are the old lines telling us color, etc.
lines += document.header[i+2 : j+1]
document.header[i:i] = lines
-
+
+
+def revert_beamer_article_styles(document):
+ " Include (scr)article styles in beamer article "
+
+ beamer_articles = ["article-beamer", "scrarticle-beamer"]
+ if document.textclass not in beamer_articles:
+ return
+
+ inclusion = "article.layout"
+ if document.textclass == "scrarticle-beamer":
+ inclusion = "scrartcl.layout"
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k - 1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (more [scr]article styles) ###",
+ "Input " + inclusion,
+ "Input beamer.layout",
+ "Provides geometry 0",
+ "Provides hyperref 0",
+ "DefaultFont",
+ " Family Roman",
+ " Series Medium",
+ " Shape Up",
+ " Size Normal",
+ " Color None",
+ "EndFont",
+ "Preamble",
+ " \\usepackage{beamerarticle,pgf}",
+ " % this default might be overridden by plain title style",
+ " \\newcommand\makebeamertitle{\\frame{\\maketitle}}%",
+ " \\AtBeginDocument{",
+ " \\let\\origtableofcontents=\\tableofcontents",
+ " \\def\\tableofcontents{\\@ifnextchar[{\\origtableofcontents}{\\gobbletableofcontents}}",
+ " \\def\\gobbletableofcontents#1{\\origtableofcontents}",
+ " }",
+ "EndPreamble",
+ "### End of insertion by lyx2lyx (more [scr]article styles) ###"
+ ]
+
+
+def convert_beamer_article_styles(document):
+ " Remove included (scr)article styles in beamer article "
+
+ beamer_articles = ["article-beamer", "scrarticle-beamer"]
+ if document.textclass not in beamer_articles:
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document: Can't find end of local layout!")
+ return
+
+ k = find_token(document.header, "### Inserted by lyx2lyx (more [scr]article styles) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (more [scr]article styles) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
+ document.header[k : l + 1] = []
+
+
+def revert_bosnian(document):
+ "Set the document language to English but assure Bosnian output"
+
+ if document.language == "bosnian":
+ document.language = "english"
+ i = find_token(document.header, "\\language bosnian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options bosnian,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options bosnian")
+
+
+def revert_friulan(document):
+ "Set the document language to English but assure Friulan output"
+
+ if document.language == "friulan":
+ document.language = "english"
+ i = find_token(document.header, "\\language friulan", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options friulan,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options friulan")
+
+
+def revert_macedonian(document):
+ "Set the document language to English but assure Macedonian output"
+
+ if document.language == "macedonian":
+ document.language = "english"
+ i = find_token(document.header, "\\language macedonian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options macedonian,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options macedonian")
+
+
+def revert_piedmontese(document):
+ "Set the document language to English but assure Piedmontese output"
+
+ if document.language == "piedmontese":
+ document.language = "english"
+ i = find_token(document.header, "\\language piedmontese", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options piedmontese,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options piedmontese")
+
+
+def revert_romansh(document):
+ "Set the document language to English but assure Romansh output"
+
+ if document.language == "romansh":
+ document.language = "english"
+ i = find_token(document.header, "\\language romansh", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package babel"
+ k = find_token(document.header, "\\options", 0)
+ if k != -1:
+ document.header[k] = document.header[k].replace("\\options", "\\options romansh,")
+ else:
+ l = find_token(document.header, "\\use_default_options", 0)
+ document.header.insert(l + 1, "\\options romansh")
+
+
+def revert_amharic(document):
+ "Set the document language to English but assure Amharic output"
+
+ if document.language == "amharic":
+ document.language = "english"
+ i = find_token(document.header, "\\language amharic", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{amharic}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{amharic}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_asturian(document):
+ "Set the document language to English but assure Asturian output"
+
+ if document.language == "asturian":
+ document.language = "english"
+ i = find_token(document.header, "\\language asturian", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{asturian}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{asturian}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_kannada(document):
+ "Set the document language to English but assure Kannada output"
+
+ if document.language == "kannada":
+ document.language = "english"
+ i = find_token(document.header, "\\language kannada", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{kannada}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{kannada}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_khmer(document):
+ "Set the document language to English but assure Khmer output"
+
+ if document.language == "khmer":
+ document.language = "english"
+ i = find_token(document.header, "\\language khmer", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{khmer}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{khmer}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_urdu(document):
+ "Set the document language to English but assure Urdu output"
+
+ if document.language == "urdu":
+ document.language = "english"
+ i = find_token(document.header, "\\language urdu", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{urdu}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{urdu}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_syriac(document):
+ "Set the document language to English but assure Syriac output"
+
+ if document.language == "syriac":
+ document.language = "english"
+ i = find_token(document.header, "\\language syriac", 0)
+ if i != -1:
+ document.header[i] = "\\language english"
+ j = find_token(document.header, "\\language_package default", 0)
+ if j != -1:
+ document.header[j] = "\\language_package default"
+ add_to_preamble(document, ["\\AtBeginDocument{\setotherlanguage{syriac}}"])
+ document.body[2 : 2] = ["\\begin_layout Standard",
+ "\\begin_inset ERT", "status open", "",
+ "\\begin_layout Plain Layout", "", "",
+ "\\backslash",
+ "resetdefaultlanguage{syriac}",
+ "\\end_layout", "", "\\end_inset", "", "",
+ "\\end_layout", ""]
+
+
+def revert_quotes(document):
+ " Revert Quote Insets in verbatim or Hebrew context to plain quotes "
+
+ # First handle verbatim insets
+ i = 0
+ j = 0
+ while i < len(document.body):
+ words = document.body[i].split()
+ if len(words) > 1 and words[0] == "\\begin_inset" and \
+ ( words[1] in ["ERT", "listings"] or ( len(words) > 2 and words[2] in ["URL", "Chunk", "Sweave", "S/R"]) ):
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of " + words[1] + " inset at line " + str(i))
+ i += 1
+ continue
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i, j)
+ if k == -1:
+ i += 1
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ else:
+ i += 1
+ continue
+
+ # Now verbatim layouts
+ i = 0
+ j = 0
+ while i < len(document.body):
+ words = document.body[i].split()
+ if len(words) > 1 and words[0] == "\\begin_layout" and \
+ words[1] in ["Verbatim", "Verbatim*", "Code", "Author_Email", "Author_URL"]:
+ j = find_end_of_layout(document.body, i)
+ if j == -1:
+ document.warning("Malformed LyX document: Can't find end of " + words[1] + " layout at line " + str(i))
+ i += 1
+ continue
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i, j)
+ if k == -1:
+ i += 1
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ else:
+ i += 1
+ continue
+
+ # Now handle Hebrew
+ if not document.language == "hebrew" and find_token(document.body, '\\lang hebrew', 0) == -1:
+ return
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ hebrew = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ hebrew = document.language == "hebrew"
+ elif document.body[ql] == "\\lang hebrew":
+ hebrew = True
+ if hebrew:
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ i = l
+
+
+def revert_iopart(document):
+ " Input new styles via local layout "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k-1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (stdlayouts) ###",
+ "Input stdlayouts.inc",
+ "### End of insertion by lyx2lyx (stdlayouts) ###"
+ ]
+
+
+def convert_iopart(document):
+ " Remove local layout we added, if it is there "
+ if document.textclass != "iopart":
+ return
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ return
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ k = find_token(document.header, "### Inserted by lyx2lyx (stdlayouts) ###", i, j)
+ if k != -1:
+ l = find_token(document.header, "### End of insertion by lyx2lyx (stdlayouts) ###", i, j)
+ if l == -1:
+ # this should not happen
+ document.warning("End of lyx2lyx local layout insertion not found!")
+ return
+ if k == i + 1 and l == j - 1:
+ # that was all the local layout there was
+ document.header[i : j + 1] = []
+ else:
+ document.header[k : l + 1] = []
+
+
+def convert_quotestyle(document):
+ " Convert \\quotes_language to \\quotes_style "
+ i = find_token(document.header, "\\quotes_language", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_language!")
+ return
+ val = get_value(document.header, "\\quotes_language", i)
+ document.header[i] = "\\quotes_style " + val
+
+
+def revert_quotestyle(document):
+ " Revert \\quotes_style to \\quotes_language "
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed LyX document! Can't find \\quotes_style!")
+ return
+ val = get_value(document.header, "\\quotes_style", i)
+ document.header[i] = "\\quotes_language " + val
+
+
+def revert_plainquote(document):
+ " Revert plain quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style plain", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes q', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ replace = "\""
+ if document.body[k].endswith("s"):
+ replace = "'"
+ document.body[k:l+1] = [replace]
+ i = l
+
+
+def convert_frenchquotes(document):
+ " Convert french quote insets to swiss "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style french", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style swiss"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("f", "c", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swissquotes(document):
+ " Revert swiss quote insets to french "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swiss", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes c', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("c", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_britishquotes(document):
+ " Revert british quote insets to english "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style british", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style english"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes b', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("b", "e", 1)
+ if val[2] == "d":
+ # opening mark
+ newval = newval.replace("d", "s")
+ else:
+ # closing mark
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_swedishgquotes(document):
+ " Revert swedish quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style swedishg", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style danish"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes w', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "d":
+ # outer marks
+ newval = val.replace("w", "a", 1).replace("r", "l")
+ else:
+ # inner marks
+ newval = val.replace("w", "s", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchquotes(document):
+ " Revert french inner quote insets "
+
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes f', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("f", "e", 1).replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_frenchinquotes(document):
+ " Revert inner frenchin quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style frenchin", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes i', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val.replace("i", "f", 1)
+ if val[2] == "s":
+ # inner marks
+ newval = newval.replace("s", "d")
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_russianquotes(document):
+ " Revert russian quote insets "
+
+ # First, revert style setting
+ i = find_token(document.header, "\\quotes_style russian", 0)
+ if i != -1:
+ document.header[i] = "\\quotes_style french"
+
+ # now the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes r', i)
+ if i == -1:
+ return
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ newval = val
+ if val[2] == "s":
+ # inner marks
+ newval = val.replace("r", "g", 1).replace("s", "d")
+ else:
+ # outer marks
+ newval = val.replace("r", "f", 1)
+ document.body[i] = document.body[i].replace(val, newval)
+ i += 1
+
+
+def revert_dynamicquotes(document):
+ " Revert dynamic quote insets "
+
+ # First, revert header
+ i = find_token(document.header, "\\dynamic_quotes", 0)
+ if i != -1:
+ del document.header[i]
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ s = "e"
+ if style == "english":
+ s = "e"
+ elif style == "swedish":
+ s = "s"
+ elif style == "german":
+ s = "g"
+ elif style == "polish":
+ s = "p"
+ elif style == "swiss":
+ s = "c"
+ elif style == "danish":
+ s = "a"
+ elif style == "plain":
+ s = "q"
+ elif style == "british":
+ s = "b"
+ elif style == "swedishg":
+ s = "w"
+ elif style == "french":
+ s = "f"
+ elif style == "frenchin":
+ s = "i"
+ elif style == "russian":
+ s = "r"
+
+ # now transform the insets
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ return
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+
+def revert_cjkquotes(document):
+ " Revert cjk quote insets "
+
+ # Get global style
+ style = "english"
+ i = find_token(document.header, "\\quotes_style", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\quotes_style")
+ else:
+ style = get_value(document.header, "\\quotes_style", i)
+
+ global_cjk = style.find("cjk") != -1
+
+ if global_cjk:
+ document.header[i] = "\\quotes_style english"
+ # transform dynamic insets
+ s = "j"
+ if style == "cjkangle":
+ s = "k"
+ i = 0
+ while True:
+ i = find_token(document.body, '\\begin_inset Quotes x', i)
+ if i == -1:
+ break
+ document.body[i] = document.body[i].replace("x", s)
+ i += 1
+
+ cjk_langs = ["chinese-simplified", "chinese-traditional", "japanese", "japanese-cjk", "korean"]
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes j', i)
+ if k == -1:
+ break
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u300E"]
+ else:
+ replace = ["\\begin_inset Formula $\\llceil$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u300F"]
+ else:
+ replace = ["\\begin_inset Formula $\\rrfloor$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300C"]
+ else:
+ replace = ["\\begin_inset Formula $\\lceil$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300D"]
+ else:
+ replace = ["\\begin_inset Formula $\\rfloor$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+ i = 0
+ j = 0
+ while True:
+ k = find_token(document.body, '\\begin_inset Quotes k', i)
+ if k == -1:
+ return
+ l = find_end_of_inset(document.body, k)
+ if l == -1:
+ document.warning("Malformed LyX document: Can't find end of Quote inset at line " + str(k))
+ i = k
+ continue
+ cjk = False
+ parent = get_containing_layout(document.body, k)
+ ql = find_token_backwards(document.body, "\\lang", k)
+ if ql == -1 or ql < parent[1]:
+ cjk = document.language in cjk_langs
+ elif document.body[ql].split()[1] in cjk_langs:
+ cjk = True
+ val = get_value(document.body, "\\begin_inset Quotes", i)[7:]
+ replace = []
+ if val[2] == "s":
+ # inner marks
+ if val[1] == "l":
+ # inner opening mark
+ if cjk:
+ replace = [u"\u3008"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle$", "\\end_inset"]
+ else:
+ # inner closing mark
+ if cjk:
+ replace = [u"\u3009"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle$", "\\end_inset"]
+ else:
+ # outer marks
+ if val[1] == "l":
+ # outer opening mark
+ if cjk:
+ replace = [u"\u300A"]
+ else:
+ replace = ["\\begin_inset Formula $\\langle\\kern -2.5pt\\langle$", "\\end_inset"]
+ else:
+ # outer closing mark
+ if cjk:
+ replace = [u"\u300B"]
+ else:
+ replace = ["\\begin_inset Formula $\\rangle\\kern -2.5pt\\rangle$", "\\end_inset"]
+
+ document.body[k:l+1] = replace
+ i = l
+
+
+def revert_crimson(document):
+ " Revert native Cochineal/Crimson font definition to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ preamble = ""
+ i = find_token(document.header, "\\font_roman \"cochineal\"", 0)
+ if i != -1:
+ osf = False
+ j = find_token(document.header, "\\font_osf true", 0)
+ if j != -1:
+ osf = True
+ preamble = "\\usepackage"
+ if osf:
+ document.header[j] = "\\font_osf false"
+ preamble += "[proportional,osf]"
+ preamble += "{cochineal}"
+ add_to_preamble(document, [preamble])
+ document.header[i] = document.header[i].replace("cochineal", "default")
+
+
+def revert_cochinealmath(document):
+ " Revert cochineal newtxmath definitions to LaTeX "
+
+ if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
+ i = find_token(document.header, "\\font_math \"cochineal-ntxm\"", 0)
+ if i != -1:
+ add_to_preamble(document, "\\usepackage[cochineal]{newtxmath}")
+ document.header[i] = document.header[i].replace("cochineal-ntxm", "auto")
+
+
+def revert_labelonly(document):
+ " Revert labelonly tag for InsetRef "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ if k == -1:
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([label])
+ i += 1
+
+
+def revert_plural_refs(document):
+ " Revert plural and capitalized references "
+ i = find_token(document.header, "\\use_refstyle 1", 0)
+ use_refstyle = (i != 0)
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+
+ plural = caps = suffix = False
+ k = find_token(document.body, "LaTeXCommand formatted", i, j)
+ if k != -1 and use_refstyle:
+ plural = get_bool_value(document.body, "plural", i, j, False)
+ caps = get_bool_value(document.body, "caps", i, j, False)
+ label = get_quoted_value(document.body, "reference", i, j)
+ if label:
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ else:
+ document.warning("Can't find label for reference at line %d!" % (i))
+
+ # this effectively tests also for use_refstyle and a formatted reference
+ # we do this complicated test because we would otherwise do this erasure
+ # over and over and over
+ if not ((plural or caps) and suffix):
+ del_token(document.body, "plural", i, j)
+ del_token(document.body, "caps", i, j - 1) # since we deleted a line
+ i = j - 1
+ continue
+
+ if caps:
+ prefix = prefix[0].title() + prefix[1:]
+ cmd = "\\" + prefix + "ref"
+ if plural:
+ cmd += "[s]"
+ cmd += "{" + suffix + "}"
+ document.body[i:j+1] = put_cmd_in_ert([cmd])
+ i += 1
+
+
+def revert_noprefix(document):
+ " Revert labelonly tags with 'noprefix' set "
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset ref", i)
+ if i == -1:
+ return
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of reference inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand labelonly", i, j)
+ if k == -1:
+ i = j
+ continue
+ noprefix = get_bool_value(document.body, "noprefix", i, j)
+ if not noprefix:
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ label = get_quoted_value(document.body, "reference", i, j)
+ if not label:
+ document.warning("Can't find label for reference at line %d!" %(i))
+ i = j + 1
+ continue
+ try:
+ (prefix, suffix) = label.split(":", 1)
+ except:
+ document.warning("No `:' separator in formatted reference at line %d!" % (i))
+ # we'll leave this as an ordinary labelonly reference
+ del_token(document.body, "noprefix", i, j)
+ i = j
+ continue
+ document.body[i:j+1] = put_cmd_in_ert([suffix])
+ i += 1
+
+
+def revert_biblatex(document):
+ " Revert biblatex support "
+
+ #
+ # Header
+ #
+
+ # 1. Get cite engine
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ # 2. Store biblatex state and revert to natbib
+ biblatex = False
+ if engine in ["biblatex", "biblatex-natbib"]:
+ biblatex = True
+ document.header[i] = "\cite_engine natbib"
+
+ # 3. Store and remove new document headers
+ bibstyle = ""
+ i = find_token(document.header, "\\biblatex_bibstyle", 0)
+ if i != -1:
+ bibstyle = get_value(document.header, "\\biblatex_bibstyle", i)
+ del document.header[i]
+
+ citestyle = ""
+ i = find_token(document.header, "\\biblatex_citestyle", 0)
+ if i != -1:
+ citestyle = get_value(document.header, "\\biblatex_citestyle", i)
+ del document.header[i]
+
+ biblio_options = ""
+ i = find_token(document.header, "\\biblio_options", 0)
+ if i != -1:
+ biblio_options = get_value(document.header, "\\biblio_options", i)
+ del document.header[i]
+
+ if biblatex:
+ bbxopts = "[natbib=true"
+ if bibstyle != "":
+ bbxopts += ",bibstyle=" + bibstyle
+ if citestyle != "":
+ bbxopts += ",citestyle=" + citestyle
+ if biblio_options != "":
+ bbxopts += "," + biblio_options
+ bbxopts += "]"
+ add_to_preamble(document, "\\usepackage" + bbxopts + "{biblatex}")
+
+ #
+ # Body
+ #
+
+ # 1. Bibtex insets
+ i = 0
+ bibresources = []
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset bibtex", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of bibtex inset at line %d!!" %(i))
+ i += 1
+ continue
+ bibs = get_quoted_value(document.body, "bibfiles", i, j)
+ opts = get_quoted_value(document.body, "biblatexopts", i, j)
+ # store resources
+ if bibs:
+ bibresources += bibs.split(",")
+ else:
+ document.warning("Can't find bibfiles for bibtex inset at line %d!" %(i))
+ # remove biblatexopts line
+ k = find_token(document.body, "biblatexopts", i, j)
+ if k != -1:
+ del document.body[k]
+ # Re-find inset end line
+ j = find_end_of_inset(document.body, i)
+ # Insert ERT \\printbibliography and wrap bibtex inset to a Note
+ if biblatex:
+ pcmd = "printbibliography"
+ if opts:
+ pcmd += "[" + opts + "]"
+ repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
+ "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
+ "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
+ "status open", "", "\\begin_layout Plain Layout" ]
+ repl += document.body[i:j+1]
+ repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
+ document.body[i:j+1] = repl
+ j += 27
+
+ i = j + 1
+
+ if biblatex:
+ for b in bibresources:
+ add_to_preamble(document, "\\addbibresource{" + b + ".bib}")
+
+ # 2. Citation insets
+
+ # Specific citation insets used in biblatex that need to be reverted to ERT
+ new_citations = {
+ "Cite" : "Cite",
+ "citebyear" : "citeyear",
+ "citeyear" : "cite*",
+ "Footcite" : "Smartcite",
+ "footcite" : "smartcite",
+ "Autocite" : "Autocite",
+ "autocite" : "autocite",
+ "citetitle" : "citetitle",
+ "citetitle*" : "citetitle*",
+ "fullcite" : "fullcite",
+ "footfullcite" : "footfullcite",
+ "supercite" : "supercite",
+ "citeauthor" : "citeauthor",
+ "citeauthor*" : "citeauthor*",
+ "Citeauthor" : "Citeauthor",
+ "Citeauthor*" : "Citeauthor*"
+ }
+
+ # All commands accepted by LyX < 2.3. Everything else throws an error.
+ old_citations = [ "cite", "nocite", "citet", "citep", "citealt", "citealp",\
+ "citeauthor", "citeyear", "citeyearpar", "citet*", "citep*",\
+ "citealt*", "citealp*", "citeauthor*", "Citet", "Citep",\
+ "Citealt", "Citealp", "Citeauthor", "Citet*", "Citep*",\
+ "Citealt*", "Citealp*", "Citeauthor*", "fullcite", "footcite",\
+ "footcitet", "footcitep", "footcitealt", "footcitealp",\
+ "footciteauthor", "footciteyear", "footciteyearpar",\
+ "citefield", "citetitle", "cite*" ]
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+ cmd = get_value(document.body, "LatexCommand", k)
+ if biblatex and cmd in list(new_citations.keys()):
+ pre = get_quoted_value(document.body, "before", i, j)
+ post = get_quoted_value(document.body, "after", i, j)
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ key = "???"
+ # Replace known new commands with ERT
+ res = "\\" + new_citations[cmd]
+ if pre:
+ res += "[" + pre + "]"
+ elif post:
+ res += "[]"
+ if post:
+ res += "[" + post + "]"
+ res += "{" + key + "}"
+ document.body[i:j+1] = put_cmd_in_ert([res])
+ elif cmd not in old_citations:
+ # Reset unknown commands to cite. This is what LyX does as well
+ # (but LyX 2.2 would break on unknown commands)
+ document.body[k] = "LatexCommand cite"
+ document.warning("Reset unknown cite command '%s' with cite" % cmd)
+ i = j + 1
+
+ # Emulate the old biblatex-workaround (pretend natbib in order to use the styles)
+ if biblatex:
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k-1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (biblatex emulation) ###",
+ "Provides natbib 1",
+ "### End of insertion by lyx2lyx (biblatex emulation) ###"
+ ]
+
+
+def revert_citekeyonly(document):
+ " Revert keyonly cite command to ERT "
+
+ i = 0
+ while (True):
+ i = find_token(document.body, "\\begin_inset CommandInset citation", i)
+ if i == -1:
+ break
+ j = find_end_of_inset(document.body, i)
+ if j == -1:
+ document.warning("Can't find end of citation inset at line %d!!" %(i))
+ i += 1
+ continue
+ k = find_token(document.body, "LatexCommand", i, j)
+ if k == -1:
+ document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
+ i = j + 1
+ continue
+ cmd = get_value(document.body, "LatexCommand", k)
+ if cmd != "keyonly":
+ i = j + 1
+ continue
+
+ key = get_quoted_value(document.body, "key", i, j)
+ if not key:
+ document.warning("Citation inset at line %d does not have a key!" %(i))
+ # Replace known new commands with ERT
+ document.body[i:j+1] = put_cmd_in_ert([key])
+ i = j + 1
+
+
+
+def revert_bibpackopts(document):
+ " Revert support for natbib/jurabib package options "
+
+ engine = "basic"
+ i = find_token(document.header, "\\cite_engine", 0)
+ if i == -1:
+ document.warning("Malformed document! Missing \\cite_engine")
+ else:
+ engine = get_value(document.header, "\\cite_engine", i)
+
+ biblatex = False
+ if engine not in ["natbib", "jurabib"]:
+ return
+
+ biblio_options = ""
+ i = find_token(document.header, "\\biblio_options", 0)
+ if i != -1:
+ biblio_options = get_value(document.header, "\\biblio_options", i)
+ del document.header[i]
+
+ i = find_token(document.header, "\\begin_local_layout", 0)
+ if i == -1:
+ k = find_token(document.header, "\\language", 0)
+ if k == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! No \\language header found!")
+ return
+ document.header[k-1 : k-1] = ["\\begin_local_layout", "\\end_local_layout"]
+ i = k - 1
+
+ j = find_end_of(document.header, i, "\\begin_local_layout", "\\end_local_layout")
+ if j == -1:
+ # this should not happen
+ document.warning("Malformed LyX document! Can't find end of local layout!")
+ return
+
+ document.header[i+1 : i+1] = [
+ "### Inserted by lyx2lyx (bibliography package options) ###",
+ "PackageOptions " + engine + " " + biblio_options,
+ "### End of insertion by lyx2lyx (bibliography package options) ###"
+ ]
+
##
# Conversion hub
convert = [
[509, [convert_microtype]],
[510, [convert_dateinset]],
- [511, [convert_ibranches]]
+ [511, [convert_ibranches]],
+ [512, [convert_beamer_article_styles]],
+ [513, []],
+ [514, []],
+ [515, []],
+ [516, [convert_inputenc]],
+ [517, []],
+ [518, [convert_iopart]],
+ [519, [convert_quotestyle]],
+ [520, []],
+ [521, [convert_frenchquotes]],
+ [522, []],
+ [523, []],
+ [524, []],
+ [525, []],
+ [526, []],
+ [527, []],
+ [528, []],
+ [529, []],
+ [530, []]
]
revert = [
+ [529, [revert_bibpackopts]],
+ [528, [revert_citekeyonly]],
+ [527, [revert_biblatex]],
+ [526, [revert_noprefix]],
+ [525, [revert_plural_refs]],
+ [524, [revert_labelonly]],
+ [523, [revert_crimson, revert_cochinealmath]],
+ [522, [revert_cjkquotes]],
+ [521, [revert_dynamicquotes]],
+ [520, [revert_britishquotes, revert_swedishgquotes, revert_frenchquotes, revert_frenchinquotes, revert_russianquotes, revert_swissquotes]],
+ [519, [revert_plainquote]],
+ [518, [revert_quotestyle]],
+ [517, [revert_iopart]],
+ [516, [revert_quotes]],
+ [515, []],
+ [514, [revert_urdu, revert_syriac]],
+ [513, [revert_amharic, revert_asturian, revert_kannada, revert_khmer]],
+ [512, [revert_bosnian, revert_friulan, revert_macedonian, revert_piedmontese, revert_romansh]],
+ [511, [revert_beamer_article_styles]],
[510, [revert_ibranches]],
[509, []],
[508, [revert_microtype]]