1 # This file is part of lyx2lyx
2 # Copyright (C) 2018 The LyX team
4 # This program is free software; you can redistribute it and/or
5 # modify it under the terms of the GNU General Public License
6 # as published by the Free Software Foundation; either version 2
7 # of the License, or (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software
16 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 """Convert files to the file format generated by lyx 2.4"""
21 from datetime import date, datetime, time
23 from lyx2lyx_tools import (
32 from parser_tools import (
45 get_containing_layout,
52 ####################################################################
53 # Private helper functions
56 def add_preamble_fonts(document, fontmap):
57 """Add collected font-packages with their option to user-preamble"""
60 if len(fontmap[pkg]) > 0:
61 xoption = "[" + ",".join(fontmap[pkg]) + "]"
64 preamble = f"\\usepackage{xoption}{{{pkg}}}"
65 add_to_preamble(document, [preamble])
68 def createkey(pkg, options):
70 return pkg + ":" + "-".join(options)
75 self.fontname = None # key into font2pkgmap
76 self.fonttype = None # roman,sans,typewriter,math
77 self.scaletype = None # None,sf,tt
78 self.scaleopt = None # None, 'scaled', 'scale'
82 self.pkgkey = None # key into pkg2fontmap
83 self.osfopt = None # None, string
84 self.osfdef = "false" # "false" or "true"
87 self.pkgkey = createkey(self.package, self.options)
92 self.font2pkgmap = dict()
93 self.pkg2fontmap = dict()
94 self.pkginmap = dict() # defines, if a map for package exists
96 def expandFontMapping(
106 """Expand fontinfo mapping"""
108 # fontlist: list of fontnames, each element
109 # may contain a ','-separated list of needed options
110 # like e.g. 'IBMPlexSansCondensed,condensed'
111 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
112 # scale_type: one of None, 'sf', 'tt'
113 # pkg: package defining the font. Defaults to fontname if None
114 # scaleopt: one of None, 'scale', 'scaled', or some other string
115 # to be used in scale option (e.g. scaled=0.7)
116 # osfopt: None or some other string to be used in osf option
117 # osfdef: "true" if osf is default
120 fe.fonttype = font_type
121 fe.scaletype = scale_type
124 fe.fontname = font_name
126 fe.scaleopt = scaleopt
130 fe.package = font_name
134 self.font2pkgmap[font_name] = fe
135 if fe.pkgkey in self.pkg2fontmap:
136 # Repeated the same entry? Check content
137 if self.pkg2fontmap[fe.pkgkey] != font_name:
138 document.error("Something is wrong in pkgname+options <-> fontname mapping")
139 self.pkg2fontmap[fe.pkgkey] = font_name
140 self.pkginmap[fe.package] = 1
142 def getfontname(self, pkg, options):
144 pkgkey = createkey(pkg, options)
145 if pkgkey not in self.pkg2fontmap:
147 fontname = self.pkg2fontmap[pkgkey]
148 if fontname not in self.font2pkgmap:
149 document.error("Something is wrong in pkgname+options <-> fontname mapping")
151 if pkgkey == self.font2pkgmap[fontname].pkgkey:
156 def createFontMapping(fontlist):
157 # Create info for known fonts for the use in
158 # convert_latexFonts() and
159 # revert_latexFonts()
161 # * Would be more handy to parse latexFonts file,
162 # but the path to this file is unknown
163 # * For now, add DejaVu and IBMPlex only.
164 # * Expand, if desired
166 for font in fontlist:
168 fm.expandFontMapping(["DejaVuSerif", "DejaVuSerifCondensed"], "roman", None, None)
169 fm.expandFontMapping(
170 ["DejaVuSans", "DejaVuSansCondensed"], "sans", "sf", None, "scaled"
172 fm.expandFontMapping(["DejaVuSansMono"], "typewriter", "tt", None, "scaled")
174 fm.expandFontMapping(
177 "IBMPlexSerifThin,thin",
178 "IBMPlexSerifExtraLight,extralight",
179 "IBMPlexSerifLight,light",
180 "IBMPlexSerifSemibold,semibold",
186 fm.expandFontMapping(
189 "IBMPlexSansCondensed,condensed",
190 "IBMPlexSansThin,thin",
191 "IBMPlexSansExtraLight,extralight",
192 "IBMPlexSansLight,light",
193 "IBMPlexSansSemibold,semibold",
200 fm.expandFontMapping(
203 "IBMPlexMonoThin,thin",
204 "IBMPlexMonoExtraLight,extralight",
205 "IBMPlexMonoLight,light",
206 "IBMPlexMonoSemibold,semibold",
213 elif font == "Adobe":
214 fm.expandFontMapping(
215 ["ADOBESourceSerifPro"], "roman", None, "sourceserifpro", None, "osf"
217 fm.expandFontMapping(
218 ["ADOBESourceSansPro"], "sans", "sf", "sourcesanspro", "scaled", "osf"
220 fm.expandFontMapping(
221 ["ADOBESourceCodePro"],
229 fm.expandFontMapping(
231 "NotoSerifRegular,regular",
232 "NotoSerifMedium,medium",
233 "NotoSerifThin,thin",
234 "NotoSerifLight,light",
235 "NotoSerifExtralight,extralight",
243 fm.expandFontMapping(
245 "NotoSansRegular,regular",
246 "NotoSansMedium,medium",
248 "NotoSansLight,light",
249 "NotoSansExtralight,extralight",
256 fm.expandFontMapping(
257 ["NotoMonoRegular,regular"], "typewriter", "tt", "noto-mono", "scaled"
259 elif font == "Cantarell":
260 fm.expandFontMapping(
261 ["cantarell,defaultsans"],
268 elif font == "Chivo":
269 fm.expandFontMapping(
274 "ChivoMedium,medium",
282 elif font == "CrimsonPro":
283 fm.expandFontMapping(
286 "CrimsonProExtraLight,extralight",
287 "CrimsonProLight,light",
288 "CrimsonProMedium,medium",
298 fm.expandFontMapping(
303 "FiraSansLight,light",
304 "FiraSansExtralight,extralight",
305 "FiraSansUltralight,ultralight",
314 fm.expandFontMapping(
315 ["FiraMono"], "typewriter", "tt", "FiraMono", "scaled", "lf", "true"
317 elif font == "libertinus":
318 fm.expandFontMapping(["libertinus,serif"], "roman", None, "libertinus", None, "osf")
319 fm.expandFontMapping(
320 ["libertinusmath"], "math", None, "libertinust1math", None, None
325 def convert_fonts(document, fm, osfoption="osf"):
326 """Handle font definition (LaTeX preamble -> native)"""
327 rpkg = re.compile(r"^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}")
328 rscaleopt = re.compile(r"^scaled?=(.*)")
330 # Check whether we go beyond font option feature introduction
331 haveFontOpts = document.end_format > 580
335 i = find_re(document.preamble, rpkg, i + 1)
338 mo = rpkg.search(document.preamble[i])
339 if mo == None or mo.group(2) == None:
342 options = mo.group(2).replace(" ", "").split(",")
347 while o < len(options):
348 if options[o] == osfoption:
352 mo = rscaleopt.search(options[o])
360 if pkg not in fm.pkginmap:
365 # Try with name-option combination first
366 # (only one default option supported currently)
368 while o < len(options):
370 fn = fm.getfontname(pkg, [opt])
377 fn = fm.getfontname(pkg, [])
379 fn = fm.getfontname(pkg, options)
382 del document.preamble[i]
383 fontinfo = fm.font2pkgmap[fn]
384 if fontinfo.scaletype == None:
387 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
388 fontinfo.scaleval = oscale
389 if (has_osf and fontinfo.osfdef == "false") or (
390 not has_osf and fontinfo.osfdef == "true"
392 if fontinfo.osfopt == None:
393 options.extend(osfoption)
395 osf = find_token(document.header, "\\font_osf false")
396 osftag = "\\font_osf"
397 if osf == -1 and fontinfo.fonttype != "math":
398 # Try with newer format
399 osftag = "\\font_" + fontinfo.fonttype + "_osf"
400 osf = find_token(document.header, osftag + " false")
402 document.header[osf] = osftag + " true"
403 if i > 0 and document.preamble[i - 1] == "% Added by lyx2lyx":
404 del document.preamble[i - 1]
406 if fontscale != None:
407 j = find_token(document.header, fontscale, 0)
409 val = get_value(document.header, fontscale, j)
413 scale = "%03d" % int(float(oscale) * 100)
414 document.header[j] = fontscale + " " + scale + " " + vals[1]
415 ft = "\\font_" + fontinfo.fonttype
416 j = find_token(document.header, ft, 0)
418 val = get_value(document.header, ft, j)
419 words = val.split() # ! splits also values like '"DejaVu Sans"'
420 words[0] = '"' + fn + '"'
421 document.header[j] = ft + " " + " ".join(words)
422 if haveFontOpts and fontinfo.fonttype != "math":
423 fotag = "\\font_" + fontinfo.fonttype + "_opts"
424 fo = find_token(document.header, fotag)
426 document.header[fo] = fotag + ' "' + ",".join(options) + '"'
428 # Sensible place to insert tag
429 fo = find_token(document.header, "\\font_sf_scale")
431 document.warning("Malformed LyX document! Missing \\font_sf_scale")
433 document.header.insert(fo, fotag + ' "' + ",".join(options) + '"')
436 def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
437 """Revert native font definition to LaTeX"""
438 # fonlist := list of fonts created from the same package
439 # Empty package means that the font-name is the same as the package-name
440 # fontmap (key = package, val += found options) will be filled
441 # and used later in add_preamble_fonts() to be added to user-preamble
443 rfontscale = re.compile(r"^\s*(\\font_(roman|sans|typewriter|math))\s+")
444 rscales = re.compile(r"^\s*(\d+)\s+(\d+)")
446 while i < len(document.header):
447 i = find_re(document.header, rfontscale, i + 1)
450 mo = rfontscale.search(document.header[i])
453 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
454 val = get_value(document.header, ft, i)
455 words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
456 font = words[0].strip('"') # TeX font name has no whitespace
457 if font not in fm.font2pkgmap:
459 fontinfo = fm.font2pkgmap[font]
460 val = fontinfo.package
461 if val not in fontmap:
464 if OnlyWithXOpts or WithXOpts:
465 if ft == "\\font_math":
467 regexp = re.compile(r"^\s*(\\font_roman_opts)\s+")
468 if ft == "\\font_sans":
469 regexp = re.compile(r"^\s*(\\font_sans_opts)\s+")
470 elif ft == "\\font_typewriter":
471 regexp = re.compile(r"^\s*(\\font_typewriter_opts)\s+")
472 x = find_re(document.header, regexp, 0)
473 if x == -1 and OnlyWithXOpts:
477 # We need to use this regex since split() does not handle quote protection
478 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
479 opts = xopts[1].strip('"').split(",")
480 fontmap[val].extend(opts)
481 del document.header[x]
482 words[0] = '"default"'
483 document.header[i] = ft + " " + " ".join(words)
484 if fontinfo.scaleopt != None:
485 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
486 mo = rscales.search(xval)
490 # set correct scale option
492 [fontinfo.scaleopt + "=" + format(float(xval1) / 100, ".2f")]
494 if fontinfo.osfopt != None:
496 if fontinfo.osfdef == "true":
498 osf = find_token(document.header, "\\font_osf " + oldval)
499 if osf == -1 and ft != "\\font_math":
500 # Try with newer format
501 osftag = "\\font_roman_osf " + oldval
502 if ft == "\\font_sans":
503 osftag = "\\font_sans_osf " + oldval
504 elif ft == "\\font_typewriter":
505 osftag = "\\font_typewriter_osf " + oldval
506 osf = find_token(document.header, osftag)
508 fontmap[val].extend([fontinfo.osfopt])
509 if len(fontinfo.options) > 0:
510 fontmap[val].extend(fontinfo.options)
514 ###############################################################################
516 ### Conversion and reversion routines
518 ###############################################################################
521 def convert_inputencoding_namechange(document):
522 """Rename inputencoding settings."""
523 i = find_token(document.header, "\\inputencoding", 0)
526 s = document.header[i].replace("auto", "auto-legacy")
527 document.header[i] = s.replace("default", "auto-legacy-plain")
530 def revert_inputencoding_namechange(document):
531 """Rename inputencoding settings."""
532 i = find_token(document.header, "\\inputencoding", 0)
535 s = document.header[i].replace("auto-legacy-plain", "default")
536 document.header[i] = s.replace("auto-legacy", "auto")
539 def convert_notoFonts(document):
540 """Handle Noto fonts definition to LaTeX"""
542 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
543 fm = createFontMapping(["Noto"])
544 convert_fonts(document, fm)
547 def revert_notoFonts(document):
548 """Revert native Noto font definition to LaTeX"""
550 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
552 fm = createFontMapping(["Noto"])
553 if revert_fonts(document, fm, fontmap):
554 add_preamble_fonts(document, fontmap)
557 def convert_latexFonts(document):
558 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
560 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
561 fm = createFontMapping(["DejaVu", "IBM"])
562 convert_fonts(document, fm)
565 def revert_latexFonts(document):
566 """Revert native DejaVu font definition to LaTeX"""
568 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
570 fm = createFontMapping(["DejaVu", "IBM"])
571 if revert_fonts(document, fm, fontmap):
572 add_preamble_fonts(document, fontmap)
575 def convert_AdobeFonts(document):
576 """Handle Adobe Source fonts definition to LaTeX"""
578 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
579 fm = createFontMapping(["Adobe"])
580 convert_fonts(document, fm)
583 def revert_AdobeFonts(document):
584 """Revert Adobe Source font definition to LaTeX"""
586 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
588 fm = createFontMapping(["Adobe"])
589 if revert_fonts(document, fm, fontmap):
590 add_preamble_fonts(document, fontmap)
593 def removeFrontMatterStyles(document):
594 """Remove styles Begin/EndFrontmatter"""
596 layouts = ["BeginFrontmatter", "EndFrontmatter"]
597 tokenend = len("\\begin_layout ")
600 i = find_token_exact(document.body, "\\begin_layout ", i + 1)
603 layout = document.body[i][tokenend:].strip()
604 if layout not in layouts:
606 j = find_end_of_layout(document.body, i)
608 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
610 while document.body[j + 1].strip() == "":
612 document.body[i : j + 1] = []
615 def addFrontMatterStyles(document):
616 """Use styles Begin/EndFrontmatter for elsarticle"""
618 if document.textclass != "elsarticle":
621 def insertFrontmatter(prefix, line):
623 while above > 0 and document.body[above - 1].strip() == "":
626 while document.body[below].strip() == "":
628 document.body[above:below] = [
630 "\\begin_layout " + prefix + "Frontmatter",
631 "\\begin_inset Note Note",
634 "\\begin_layout Plain Layout",
650 "Corresponding author",
656 tokenend = len("\\begin_layout ")
660 i = find_token_exact(document.body, "\\begin_layout ", i + 1)
663 layout = document.body[i][tokenend:].strip()
664 if layout not in layouts:
666 k = find_end_of_layout(document.body, i)
668 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
675 insertFrontmatter("End", k + 1)
676 insertFrontmatter("Begin", first)
679 def convert_lst_literalparam(document):
680 """Add param literal to include inset"""
684 i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
687 j = find_end_of_inset(document.body, i)
690 "Malformed LyX document: Can't find end of command inset at line %d" % i
693 while i < j and document.body[i].strip() != "":
695 document.body.insert(i, 'literal "true"')
698 def revert_lst_literalparam(document):
699 """Remove param literal from include inset"""
703 i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
706 j = find_end_of_inset(document.body, i)
709 "Malformed LyX document: Can't find end of include inset at line %d" % i
712 del_token(document.body, "literal", i, j)
715 def revert_paratype(document):
716 """Revert ParaType font definitions to LaTeX"""
718 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
719 i1 = find_token(document.header, '\\font_roman "PTSerif-TLF"', 0)
720 i2 = find_token(document.header, '\\font_sans "default"', 0)
721 i3 = find_token(document.header, '\\font_typewriter "default"', 0)
722 j = find_token(document.header, '\\font_sans "PTSans-TLF"', 0)
725 sfval = find_token(document.header, "\\font_sf_scale", 0)
727 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
729 sfscale = document.header[sfval].split()
732 document.header[sfval] = " ".join(sfscale)
735 sf_scale = float(val)
737 document.warning("Invalid font_sf_scale value: " + val)
740 if sf_scale != "100.0":
741 sfoption = "scaled=" + str(sf_scale / 100.0)
742 k = find_token(document.header, '\\font_typewriter "PTMono-TLF"', 0)
743 ttval = get_value(document.header, "\\font_tt_scale", 0)
748 ttoption = "scaled=" + format(float(ttval) / 100, ".2f")
749 if i1 != -1 and i2 != -1 and i3 != -1:
750 add_to_preamble(document, ["\\usepackage{paratype}"])
753 add_to_preamble(document, ["\\usepackage{PTSerif}"])
754 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
757 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
759 add_to_preamble(document, ["\\usepackage{PTSans}"])
760 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
763 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
765 add_to_preamble(document, ["\\usepackage{PTMono}"])
766 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
769 def revert_xcharter(document):
770 """Revert XCharter font definitions to LaTeX"""
772 i = find_token(document.header, '\\font_roman "xcharter"', 0)
776 # replace unsupported font setting
777 document.header[i] = document.header[i].replace("xcharter", "default")
778 # no need for preamble code with system fonts
779 if get_bool_value(document.header, "\\use_non_tex_fonts"):
782 # transfer old style figures setting to package options
783 j = find_token(document.header, "\\font_osf true")
786 document.header[j] = "\\font_osf false"
790 add_to_preamble(document, ["\\usepackage%s{XCharter}" % options])
793 def revert_lscape(document):
794 """Reverts the landscape environment (Landscape module) to TeX-code"""
796 if "landscape" not in document.get_module_list():
801 i = find_token(document.body, "\\begin_inset Flex Landscape", i + 1)
804 j = find_end_of_inset(document.body, i)
806 document.warning("Malformed LyX document: Can't find end of Landscape inset")
809 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
810 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
811 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
812 add_to_preamble(document, ["\\usepackage{afterpage}"])
814 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
815 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
817 add_to_preamble(document, ["\\usepackage{pdflscape}"])
818 document.del_module("landscape")
821 def convert_fontenc(document):
822 """Convert default fontenc setting"""
824 i = find_token(document.header, "\\fontencoding global", 0)
828 document.header[i] = document.header[i].replace("global", "auto")
831 def revert_fontenc(document):
832 """Revert default fontenc setting"""
834 i = find_token(document.header, "\\fontencoding auto", 0)
838 document.header[i] = document.header[i].replace("auto", "global")
841 def revert_nospellcheck(document):
842 """Remove nospellcheck font info param"""
846 i = find_token(document.body, "\\nospellcheck", i)
852 def revert_floatpclass(document):
853 """Remove float placement params 'document' and 'class'"""
855 del_token(document.header, "\\float_placement class")
859 i = find_token(document.body, "\\begin_inset Float", i + 1)
862 j = find_end_of_inset(document.body, i)
863 k = find_token(document.body, "placement class", i, j)
865 k = find_token(document.body, "placement document", i, j)
872 def revert_floatalignment(document):
873 """Remove float alignment params"""
875 galignment = get_value(document.header, "\\float_alignment", delete=True)
879 i = find_token(document.body, "\\begin_inset Float", i + 1)
882 j = find_end_of_inset(document.body, i)
885 "Malformed LyX document: Can't find end of inset at line " + str(i)
888 k = find_token(document.body, "alignment", i, j)
892 alignment = get_value(document.body, "alignment", k)
893 if alignment == "document":
894 alignment = galignment
896 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
898 document.warning("Can't find float layout!")
901 if alignment == "left":
902 alcmd = put_cmd_in_ert("\\raggedright{}")
903 elif alignment == "center":
904 alcmd = put_cmd_in_ert("\\centering{}")
905 elif alignment == "right":
906 alcmd = put_cmd_in_ert("\\raggedleft{}")
908 document.body[l + 1 : l + 1] = alcmd
909 # There might be subfloats, so we do not want to move past
910 # the end of the inset.
914 def revert_tuftecite(document):
915 r"""Revert \cite commands in tufte classes"""
917 tufte = ["tufte-book", "tufte-handout"]
918 if document.textclass not in tufte:
923 i = find_token(document.body, "\\begin_inset CommandInset citation", i + 1)
926 j = find_end_of_inset(document.body, i)
928 document.warning("Can't find end of citation inset at line %d!!" % (i))
930 k = find_token(document.body, "LatexCommand", i, j)
932 document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
935 cmd = get_value(document.body, "LatexCommand", k)
939 pre = get_quoted_value(document.body, "before", i, j)
940 post = get_quoted_value(document.body, "after", i, j)
941 key = get_quoted_value(document.body, "key", i, j)
943 document.warning("Citation inset at line %d does not have a key!" % (i))
945 # Replace command with ERT
948 res += "[" + pre + "]"
950 res += "[" + post + "]"
953 res += "{" + key + "}"
954 document.body[i : j + 1] = put_cmd_in_ert([res])
958 def revert_stretchcolumn(document):
959 """We remove the column varwidth flags or everything else will become a mess."""
962 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
965 j = find_end_of_inset(document.body, i + 1)
967 document.warning("Malformed LyX document: Could not find end of tabular.")
969 for k in range(i, j):
970 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
971 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
972 document.body[k] = document.body[k].replace(' varwidth="true"', "")
975 def revert_vcolumns(document):
976 """Revert standard columns with line breaks etc."""
982 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
985 j = find_end_of_inset(document.body, i)
987 document.warning("Malformed LyX document: Could not find end of tabular.")
990 # Collect necessary column information
992 nrows = int(document.body[i + 1].split('"')[3])
993 ncols = int(document.body[i + 1].split('"')[5])
995 for k in range(ncols):
996 m = find_token(document.body, "<column", m)
997 width = get_option_value(document.body[m], "width")
998 varwidth = get_option_value(document.body[m], "varwidth")
999 alignment = get_option_value(document.body[m], "alignment")
1000 special = get_option_value(document.body[m], "special")
1001 col_info.append([width, varwidth, alignment, special, m])
1006 for row in range(nrows):
1007 for col in range(ncols):
1008 m = find_token(document.body, "<cell", m)
1009 multicolumn = get_option_value(document.body[m], "multicolumn")
1010 multirow = get_option_value(document.body[m], "multirow")
1011 width = get_option_value(document.body[m], "width")
1012 rotate = get_option_value(document.body[m], "rotate")
1013 # Check for: linebreaks, multipars, non-standard environments
1015 endcell = find_token(document.body, "</cell>", begcell)
1018 find_token(document.body, "\\begin_inset Newline", begcell, endcell)
1022 elif count_pars_in_inset(document.body, begcell + 2) > 1:
1024 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
1029 and ((multicolumn == "" and multirow == "") or width == "")
1032 col_info[col][0] == ""
1033 and col_info[col][1] == ""
1034 and col_info[col][3] == ""
1037 alignment = col_info[col][2]
1038 col_line = col_info[col][4]
1040 if alignment == "center":
1041 vval = ">{\\centering}"
1042 elif alignment == "left":
1043 vval = ">{\\raggedright}"
1044 elif alignment == "right":
1045 vval = ">{\\raggedleft}"
1048 vval += "V{\\linewidth}"
1050 document.body[col_line] = (
1051 document.body[col_line][:-1] + ' special="' + vval + '">'
1053 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
1054 # with newlines, and we do not want that)
1056 endcell = find_token(document.body, "</cell>", begcell)
1060 "\\begin_inset Newline newline",
1067 "\\begin_inset Newline linebreak",
1074 nle = find_end_of_inset(document.body, nl)
1075 del document.body[nle : nle + 1]
1077 document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
1079 document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
1085 if needarray == True:
1086 add_to_preamble(document, ["\\usepackage{array}"])
1087 if needvarwidth == True:
1088 add_to_preamble(document, ["\\usepackage{varwidth}"])
1091 def revert_bibencoding(document):
1092 """Revert bibliography encoding"""
1096 i = find_token(document.header, "\\cite_engine", 0)
1098 document.warning("Malformed document! Missing \\cite_engine")
1100 engine = get_value(document.header, "\\cite_engine", i)
1104 if engine in ["biblatex", "biblatex-natbib"]:
1107 # Map lyx to latex encoding names
1111 "armscii8": "armscii8",
1112 "iso8859-1": "latin1",
1113 "iso8859-2": "latin2",
1114 "iso8859-3": "latin3",
1115 "iso8859-4": "latin4",
1116 "iso8859-5": "iso88595",
1117 "iso8859-6": "8859-6",
1118 "iso8859-7": "iso-8859-7",
1119 "iso8859-8": "8859-8",
1120 "iso8859-9": "latin5",
1121 "iso8859-13": "latin7",
1122 "iso8859-15": "latin9",
1123 "iso8859-16": "latin10",
1124 "applemac": "applemac",
1126 "cp437de": "cp437de",
1143 "utf8-platex": "utf8",
1149 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
1152 j = find_end_of_inset(document.body, i)
1154 document.warning("Can't find end of bibtex inset at line %d!!" % (i))
1156 encoding = get_quoted_value(document.body, "encoding", i, j)
1159 # remove encoding line
1160 k = find_token(document.body, "encoding", i, j)
1162 del document.body[k]
1163 if encoding == "default":
1165 # Re-find inset end line
1166 j = find_end_of_inset(document.body, i)
1169 h = find_token(document.header, "\\biblio_options", 0)
1171 biblio_options = get_value(document.header, "\\biblio_options", h)
1172 if "bibencoding" not in biblio_options:
1173 document.header[h] += ",bibencoding=%s" % encodings[encoding]
1175 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
1177 # this should not happen
1179 "Malformed LyX document! No \\biblatex_bibstyle header found!"
1182 document.header[bs - 1 : bs - 1] = [
1183 "\\biblio_options bibencoding=" + encodings[encoding]
1186 document.body[j + 1 : j + 1] = put_cmd_in_ert("\\egroup")
1187 document.body[i:i] = put_cmd_in_ert(
1188 "\\bgroup\\inputencoding{" + encodings[encoding] + "}"
1194 def convert_vcsinfo(document):
1195 """Separate vcs Info inset from buffer Info inset."""
1198 "vcs-revision": "revision",
1199 "vcs-tree-revision": "tree-revision",
1200 "vcs-author": "author",
1206 i = find_token(document.body, "\\begin_inset Info", i + 1)
1209 j = find_end_of_inset(document.body, i + 1)
1211 document.warning("Malformed LyX document: Could not find end of Info inset.")
1213 tp = find_token(document.body, "type", i, j)
1214 tpv = get_quoted_value(document.body, "type", tp)
1217 arg = find_token(document.body, "arg", i, j)
1218 argv = get_quoted_value(document.body, "arg", arg)
1219 if argv not in list(types.keys()):
1221 document.body[tp] = 'type "vcs"'
1222 document.body[arg] = 'arg "' + types[argv] + '"'
1225 def revert_vcsinfo(document):
1226 """Merge vcs Info inset to buffer Info inset."""
1228 args = ["revision", "tree-revision", "author", "time", "date"]
1231 i = find_token(document.body, "\\begin_inset Info", i + 1)
1234 j = find_end_of_inset(document.body, i + 1)
1236 document.warning("Malformed LyX document: Could not find end of Info inset.")
1238 tp = find_token(document.body, "type", i, j)
1239 tpv = get_quoted_value(document.body, "type", tp)
1242 arg = find_token(document.body, "arg", i, j)
1243 argv = get_quoted_value(document.body, "arg", arg)
1244 if argv not in args:
1245 document.warning("Malformed Info inset. Invalid vcs arg.")
1247 document.body[tp] = 'type "buffer"'
1248 document.body[arg] = 'arg "vcs-' + argv + '"'
1251 def revert_vcsinfo_rev_abbrev(document):
1252 "Convert abbreviated revisions to regular revisions."
1256 i = find_token(document.body, "\\begin_inset Info", i + 1)
1259 j = find_end_of_inset(document.body, i + 1)
1261 document.warning("Malformed LyX document: Could not find end of Info inset.")
1263 tp = find_token(document.body, "type", i, j)
1264 tpv = get_quoted_value(document.body, "type", tp)
1267 arg = find_token(document.body, "arg", i, j)
1268 argv = get_quoted_value(document.body, "arg", arg)
1269 if argv == "revision-abbrev":
1270 document.body[arg] = 'arg "revision"'
1273 def revert_dateinfo(document):
1274 """Revert date info insets to static text."""
1276 # FIXME This currently only considers the main language and uses the system locale
1277 # Ideally, it should honor context languages and switch the locale accordingly.
1279 # The date formats for each language using strftime syntax:
1280 # long, short, loclong, locmedium, locshort
1282 "afrikaans": ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1283 "albanian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1284 "american": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1285 "amharic": ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1321 "australian": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1322 "austrian": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1323 "bahasa": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1324 "bahasam": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1325 "basque": ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1341 "%A, %d de %B de %Y",
1347 "breton": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1348 "british": ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1356 "canadian": ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1357 "canadien": ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1365 "chinese-simplified": [
1372 "chinese-traditional": [
1379 "coptic": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1387 "czech": ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1395 "divehi": ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1396 "dutch": ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1397 "english": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1405 "estonian": ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1406 "farsi": ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1407 "finnish": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1408 "french": ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1410 "%A %d di %B dal %Y",
1417 "%A, %d de %B de %Y",
1423 "georgian": ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1424 "german": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1439 "greek": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1440 "hebrew": ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1441 "hindi": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1456 "irish": ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1457 "italian": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1472 "kannada": ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1473 "kazakh": ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1474 "khmer": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1482 "kurmanji": ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1483 "lao": ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1484 "latin": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1486 "%A, %Y. gada %d. %B",
1493 "%Y m. %B %d d., %A",
1506 "macedonian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1514 "malayalam": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1515 "marathi": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1517 "%A, %Y оны %m сарын %d",
1519 "%Y оны %m сарын %d",
1530 "newzealand": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1531 "ngerman": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1532 "norsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1533 "nynorsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1534 "occitan": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1542 "polish": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1543 "polutonikogreek": [
1551 "%A, %d de %B de %Y",
1557 "romanian": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1559 "%A, ils %d da %B %Y",
1579 "sanskrit": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1580 "scottish": ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1595 "slovak": ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1604 "%A, %d de %B de %Y",
1611 "%A, %d de %B %de %Y",
1617 "swedish": ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1618 "syriac": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1619 "tamil": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1620 "telugu": ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1621 "thai": ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1623 "%Y %Bའི་ཚེས་%d, %A",
1629 "turkish": ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1651 "urdu": ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1659 "welsh": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1662 types = ["date", "fixdate", "moddate"]
1663 lang = get_value(document.header, "\\language")
1665 document.warning("Malformed LyX document! No \\language header found!")
1670 i = find_token(document.body, "\\begin_inset Info", i + 1)
1673 j = find_end_of_inset(document.body, i + 1)
1675 document.warning("Malformed LyX document: Could not find end of Info inset.")
1677 tp = find_token(document.body, "type", i, j)
1678 tpv = get_quoted_value(document.body, "type", tp)
1679 if tpv not in types:
1681 arg = find_token(document.body, "arg", i, j)
1682 argv = get_quoted_value(document.body, "arg", arg)
1685 if tpv == "fixdate":
1686 datecomps = argv.split("@")
1687 if len(datecomps) > 1:
1689 isodate = datecomps[1]
1690 m = re.search(r"(\d\d\d\d)-(\d\d)-(\d\d)", isodate)
1692 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1693 # FIXME if we had the path to the original document (not the one in the tmp dir),
1694 # we could use the mtime.
1695 # elif tpv == "moddate":
1696 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1699 result = dte.isodate()
1700 elif argv == "long":
1701 result = dte.strftime(dateformats[lang][0])
1702 elif argv == "short":
1703 result = dte.strftime(dateformats[lang][1])
1704 elif argv == "loclong":
1705 result = dte.strftime(dateformats[lang][2])
1706 elif argv == "locmedium":
1707 result = dte.strftime(dateformats[lang][3])
1708 elif argv == "locshort":
1709 result = dte.strftime(dateformats[lang][4])
1712 argv.replace("MMMM", "%b")
1713 .replace("MMM", "%b")
1714 .replace("MM", "%m")
1717 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1718 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1719 fmt = re.sub("[^'%]d", "%d", fmt)
1720 fmt = fmt.replace("'", "")
1721 result = dte.strftime(fmt)
1722 document.body[i : j + 1] = [result]
1725 def revert_timeinfo(document):
1726 """Revert time info insets to static text."""
1728 # FIXME This currently only considers the main language and uses the system locale
1729 # Ideally, it should honor context languages and switch the locale accordingly.
1730 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1733 # The time formats for each language using strftime syntax:
1736 "afrikaans": ["%H:%M:%S %Z", "%H:%M"],
1737 "albanian": ["%I:%M:%S %p, %Z", "%I:%M %p"],
1738 "american": ["%I:%M:%S %p %Z", "%I:%M %p"],
1739 "amharic": ["%I:%M:%S %p %Z", "%I:%M %p"],
1740 "ancientgreek": ["%H:%M:%S %Z", "%H:%M:%S"],
1741 "arabic_arabi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1742 "arabic_arabtex": ["%I:%M:%S %p %Z", "%I:%M %p"],
1743 "armenian": ["%H:%M:%S %Z", "%H:%M"],
1744 "asturian": ["%H:%M:%S %Z", "%H:%M"],
1745 "australian": ["%I:%M:%S %p %Z", "%I:%M %p"],
1746 "austrian": ["%H:%M:%S %Z", "%H:%M"],
1747 "bahasa": ["%H.%M.%S %Z", "%H.%M"],
1748 "bahasam": ["%I:%M:%S %p %Z", "%I:%M %p"],
1749 "basque": ["%H:%M:%S (%Z)", "%H:%M"],
1750 "belarusian": ["%H:%M:%S, %Z", "%H:%M"],
1751 "bosnian": ["%H:%M:%S %Z", "%H:%M"],
1752 "brazilian": ["%H:%M:%S %Z", "%H:%M"],
1753 "breton": ["%H:%M:%S %Z", "%H:%M"],
1754 "british": ["%H:%M:%S %Z", "%H:%M"],
1755 "bulgarian": ["%H:%M:%S %Z", "%H:%M"],
1756 "canadian": ["%I:%M:%S %p %Z", "%I:%M %p"],
1757 "canadien": ["%H:%M:%S %Z", "%H h %M"],
1758 "catalan": ["%H:%M:%S %Z", "%H:%M"],
1759 "chinese-simplified": ["%Z %p%I:%M:%S", "%p%I:%M"],
1760 "chinese-traditional": ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1761 "coptic": ["%H:%M:%S %Z", "%H:%M:%S"],
1762 "croatian": ["%H:%M:%S (%Z)", "%H:%M"],
1763 "czech": ["%H:%M:%S %Z", "%H:%M"],
1764 "danish": ["%H.%M.%S %Z", "%H.%M"],
1765 "divehi": ["%H:%M:%S %Z", "%H:%M"],
1766 "dutch": ["%H:%M:%S %Z", "%H:%M"],
1767 "english": ["%I:%M:%S %p %Z", "%I:%M %p"],
1768 "esperanto": ["%H:%M:%S %Z", "%H:%M:%S"],
1769 "estonian": ["%H:%M:%S %Z", "%H:%M"],
1770 "farsi": ["%H:%M:%S (%Z)", "%H:%M"],
1771 "finnish": ["%H.%M.%S %Z", "%H.%M"],
1772 "french": ["%H:%M:%S %Z", "%H:%M"],
1773 "friulan": ["%H:%M:%S %Z", "%H:%M"],
1774 "galician": ["%H:%M:%S %Z", "%H:%M"],
1775 "georgian": ["%H:%M:%S %Z", "%H:%M"],
1776 "german": ["%H:%M:%S %Z", "%H:%M"],
1777 "german-ch": ["%H:%M:%S %Z", "%H:%M"],
1778 "german-ch-old": ["%H:%M:%S %Z", "%H:%M"],
1779 "greek": ["%I:%M:%S %p %Z", "%I:%M %p"],
1780 "hebrew": ["%H:%M:%S %Z", "%H:%M"],
1781 "hindi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1782 "icelandic": ["%H:%M:%S %Z", "%H:%M"],
1783 "interlingua": ["%H:%M:%S %Z", "%H:%M"],
1784 "irish": ["%H:%M:%S %Z", "%H:%M"],
1785 "italian": ["%H:%M:%S %Z", "%H:%M"],
1786 "japanese": ["%H時%M分%S秒 %Z", "%H:%M"],
1787 "japanese-cjk": ["%H時%M分%S秒 %Z", "%H:%M"],
1788 "kannada": ["%I:%M:%S %p %Z", "%I:%M %p"],
1789 "kazakh": ["%H:%M:%S %Z", "%H:%M"],
1790 "khmer": ["%I:%M:%S %p %Z", "%I:%M %p"],
1791 "korean": ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1792 "kurmanji": ["%H:%M:%S %Z", "%H:%M:%S"],
1793 "lao": ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1794 "latin": ["%H:%M:%S %Z", "%H:%M:%S"],
1795 "latvian": ["%H:%M:%S %Z", "%H:%M"],
1796 "lithuanian": ["%H:%M:%S %Z", "%H:%M"],
1797 "lowersorbian": ["%H:%M:%S %Z", "%H:%M"],
1798 "macedonian": ["%H:%M:%S %Z", "%H:%M"],
1799 "magyar": ["%H:%M:%S %Z", "%H:%M"],
1800 "malayalam": ["%p %I:%M:%S %Z", "%p %I:%M"],
1801 "marathi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1802 "mongolian": ["%H:%M:%S %Z", "%H:%M"],
1803 "naustrian": ["%H:%M:%S %Z", "%H:%M"],
1804 "newzealand": ["%I:%M:%S %p %Z", "%I:%M %p"],
1805 "ngerman": ["%H:%M:%S %Z", "%H:%M"],
1806 "norsk": ["%H:%M:%S %Z", "%H:%M"],
1807 "nynorsk": ["kl. %H:%M:%S %Z", "%H:%M"],
1808 "occitan": ["%H:%M:%S %Z", "%H:%M"],
1809 "piedmontese": ["%H:%M:%S %Z", "%H:%M:%S"],
1810 "polish": ["%H:%M:%S %Z", "%H:%M"],
1811 "polutonikogreek": ["%I:%M:%S %p %Z", "%I:%M %p"],
1812 "portuguese": ["%H:%M:%S %Z", "%H:%M"],
1813 "romanian": ["%H:%M:%S %Z", "%H:%M"],
1814 "romansh": ["%H:%M:%S %Z", "%H:%M"],
1815 "russian": ["%H:%M:%S %Z", "%H:%M"],
1816 "samin": ["%H:%M:%S %Z", "%H:%M"],
1817 "sanskrit": ["%H:%M:%S %Z", "%H:%M"],
1818 "scottish": ["%H:%M:%S %Z", "%H:%M"],
1819 "serbian": ["%H:%M:%S %Z", "%H:%M"],
1820 "serbian-latin": ["%H:%M:%S %Z", "%H:%M"],
1821 "slovak": ["%H:%M:%S %Z", "%H:%M"],
1822 "slovene": ["%H:%M:%S %Z", "%H:%M"],
1823 "spanish": ["%H:%M:%S (%Z)", "%H:%M"],
1824 "spanish-mexico": ["%H:%M:%S %Z", "%H:%M"],
1825 "swedish": ["kl. %H:%M:%S %Z", "%H:%M"],
1826 "syriac": ["%H:%M:%S %Z", "%H:%M"],
1827 "tamil": ["%p %I:%M:%S %Z", "%p %I:%M"],
1828 "telugu": ["%I:%M:%S %p %Z", "%I:%M %p"],
1829 "thai": ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1830 "tibetan": ["%I:%M:%S %p %Z", "%I:%M %p"],
1831 "turkish": ["%H:%M:%S %Z", "%H:%M"],
1832 "turkmen": ["%H:%M:%S %Z", "%H:%M"],
1833 "ukrainian": ["%H:%M:%S %Z", "%H:%M"],
1834 "uppersorbian": ["%H:%M:%S %Z", "%H:%M hodź."],
1835 "urdu": ["%I:%M:%S %p %Z", "%I:%M %p"],
1836 "vietnamese": ["%H:%M:%S %Z", "%H:%M"],
1837 "welsh": ["%H:%M:%S %Z", "%H:%M"],
1840 types = ["time", "fixtime", "modtime"]
1841 i = find_token(document.header, "\\language", 0)
1843 # this should not happen
1844 document.warning("Malformed LyX document! No \\language header found!")
1846 lang = get_value(document.header, "\\language", i)
1850 i = find_token(document.body, "\\begin_inset Info", i + 1)
1853 j = find_end_of_inset(document.body, i + 1)
1855 document.warning("Malformed LyX document: Could not find end of Info inset.")
1857 tp = find_token(document.body, "type", i, j)
1858 tpv = get_quoted_value(document.body, "type", tp)
1859 if tpv not in types:
1861 arg = find_token(document.body, "arg", i, j)
1862 argv = get_quoted_value(document.body, "arg", arg)
1864 dtme = datetime.now()
1866 if tpv == "fixtime":
1867 timecomps = argv.split("@")
1868 if len(timecomps) > 1:
1870 isotime = timecomps[1]
1871 m = re.search(r"(\d\d):(\d\d):(\d\d)", isotime)
1873 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1875 m = re.search(r"(\d\d):(\d\d)", isotime)
1877 tme = time(int(m.group(1)), int(m.group(2)))
1878 # FIXME if we had the path to the original document (not the one in the tmp dir),
1879 # we could use the mtime.
1880 # elif tpv == "moddate":
1881 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1884 result = tme.isoformat()
1885 elif argv == "long":
1886 result = tme.strftime(timeformats[lang][0])
1887 elif argv == "short":
1888 result = tme.strftime(timeformats[lang][1])
1891 argv.replace("HH", "%H")
1893 .replace("hh", "%I")
1897 fmt.replace("mm", "%M")
1899 .replace("ss", "%S")
1902 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1904 fmt.replace("AP", "%p")
1905 .replace("ap", "%p")
1909 fmt = fmt.replace("'", "")
1910 result = dte.strftime(fmt)
1911 document.body[i : j + 1] = result
1914 def revert_namenoextinfo(document):
1915 """Merge buffer Info inset type name-noext to name."""
1919 i = find_token(document.body, "\\begin_inset Info", i + 1)
1922 j = find_end_of_inset(document.body, i + 1)
1924 document.warning("Malformed LyX document: Could not find end of Info inset.")
1926 tp = find_token(document.body, "type", i, j)
1927 tpv = get_quoted_value(document.body, "type", tp)
1930 arg = find_token(document.body, "arg", i, j)
1931 argv = get_quoted_value(document.body, "arg", arg)
1932 if argv != "name-noext":
1934 document.body[arg] = 'arg "name"'
1937 def revert_l7ninfo(document):
1938 """Revert l7n Info inset to text."""
1942 i = find_token(document.body, "\\begin_inset Info", i + 1)
1945 j = find_end_of_inset(document.body, i + 1)
1947 document.warning("Malformed LyX document: Could not find end of Info inset.")
1949 tp = find_token(document.body, "type", i, j)
1950 tpv = get_quoted_value(document.body, "type", tp)
1953 arg = find_token(document.body, "arg", i, j)
1954 argv = get_quoted_value(document.body, "arg", arg)
1955 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1959 .replace(" & ", "</amp;>")
1961 .replace("</amp;>", " & ")
1963 document.body[i : j + 1] = argv
1966 def revert_listpargs(document):
1967 """Reverts listpreamble arguments to TeX-code"""
1970 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i + 1)
1973 j = find_end_of_inset(document.body, i)
1974 # Find containing paragraph layout
1975 parent = get_containing_layout(document.body, i)
1977 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1980 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1981 endPlain = find_end_of_layout(document.body, beginPlain)
1982 content = document.body[beginPlain + 1 : endPlain]
1983 del document.body[i : j + 1]
1986 "\\begin_inset ERT",
1989 "\\begin_layout Plain Layout",
1993 + ["}", "\\end_layout", "", "\\end_inset", ""]
1995 document.body[parbeg:parbeg] = subst
1998 def revert_lformatinfo(document):
1999 """Revert layout format Info inset to text."""
2003 i = find_token(document.body, "\\begin_inset Info", i + 1)
2006 j = find_end_of_inset(document.body, i + 1)
2008 document.warning("Malformed LyX document: Could not find end of Info inset.")
2010 tp = find_token(document.body, "type", i, j)
2011 tpv = get_quoted_value(document.body, "type", tp)
2012 if tpv != "lyxinfo":
2014 arg = find_token(document.body, "arg", i, j)
2015 argv = get_quoted_value(document.body, "arg", arg)
2016 if argv != "layoutformat":
2019 document.body[i : j + 1] = "69"
2022 def convert_hebrew_parentheses(document):
2023 """Swap opening/closing parentheses in Hebrew text.
2025 Up to LyX 2.4, "(" was used as closing parenthesis and
2026 ")" as opening parenthesis for Hebrew in the LyX source.
2028 current_languages = [document.language]
2029 current_layouts = []
2031 # pass thru argument insets
2032 skip_layouts_arguments = {}
2033 skip_insets_arguments = {}
2035 skip_insets = ["Formula", "ERT", "listings", "Flex URL"]
2036 # pass thru insets per document class
2037 if document.textclass in [
2039 "scrarticle-beamer",
2043 skip_layouts_arguments.update(
2045 "Itemize": ["1", "item:2"],
2046 "Enumerate": ["1", "item:2"],
2047 "Description": ["1", "item:1"],
2051 "Subsection": ["1"],
2052 "Subsection*": ["1"],
2053 "Subsubsection": ["1"],
2054 "Subsubsection*": ["1"],
2055 "Frame": ["1", "2"],
2056 "AgainFrame": ["1", "2"],
2057 "PlainFrame": ["1", "2"],
2058 "FragileFrame": ["1", "2"],
2059 "FrameTitle": ["1"],
2060 "FrameSubtitle": ["1"],
2061 "Overprint": ["item:1"],
2065 "ExampleBlock": ["1"],
2066 "AlertBlock": ["1"],
2072 "Definition": ["1"],
2073 "Definitions": ["1"],
2083 skip_insets_arguments.update(
2086 "Flex Emphasize": ["1"],
2087 "Flex Alert": ["1"],
2088 "Flex Structure": ["1"],
2090 "Flex Uncover": ["1"],
2091 "Flex Visible": ["1"],
2092 "Flex Invisible": ["1"],
2093 "Flex Alternative": ["1"],
2094 "Flex Beamer Note": ["1"],
2097 elif document.textclass == "europecv":
2098 skip_layouts_arguments.update({"Picture": ["1"], "Item": ["1"], "MotherTongue": ["1"]})
2099 elif document.textclass in ["acmsiggraph", "acmsiggraph-0-92"]:
2100 skip_insets_arguments.update({"Flex CRcat": ["1", "2", "3"]})
2101 elif document.textclass in ["aastex", "aastex6", "aastex62"]:
2102 skip_layouts_arguments.update(
2104 "Altaffilation": ["1"],
2107 elif document.textclass == "jss":
2108 skip_insets.append("Flex Code Chunk")
2109 elif document.textclass == "moderncv":
2110 skip_layouts_arguments.update(
2112 "Photo": ["1", "2"],
2115 skip_insets_arguments.update({"Flex Column": ["1"]})
2116 elif document.textclass == "agutex":
2117 skip_layouts_arguments.update({"Author affiliation": ["1"]})
2118 elif document.textclass in ["ijmpd", "ijmpc"]:
2119 skip_layouts_arguments.update({"RomanList": ["1"]})
2120 elif document.textclass in ["jlreq-book", "jlreq-report", "jlreq-article"]:
2121 skip_insets.append("Flex Warichu*")
2122 # pathru insets per module
2123 if "hpstatement" in document.get_module_list():
2124 skip_insets.append("Flex H-P number")
2125 if "tcolorbox" in document.get_module_list():
2126 skip_layouts_arguments.update({"New Color Box Type": ["3"]})
2127 if "sweave" in document.get_module_list():
2130 "Flex Sweave Options",
2131 "Flex S/R expression",
2132 "Flex Sweave Input File",
2136 if "knitr" in document.get_module_list():
2137 skip_insets.extend(["Flex Sweave Options", "Flex S/R expression", "Flex Chunk"])
2138 if "linguistics" in document.get_module_list():
2139 skip_layouts_arguments.update(
2141 "Numbered Example (multiline)": ["1"],
2142 "Numbered Examples (consecutive)": ["1"],
2143 "Subexample": ["1"],
2146 if "chessboard" in document.get_module_list():
2147 skip_insets.append("Flex Mainline")
2148 skip_layouts_arguments.update({"NewChessGame": ["1"]})
2149 skip_insets_arguments.update({"Flex ChessBoard": ["1"]})
2150 if "lilypond" in document.get_module_list():
2151 skip_insets.append("Flex LilyPond")
2152 if "noweb" in document.get_module_list():
2153 skip_insets.append("Flex Chunk")
2154 if "multicol" in document.get_module_list():
2155 skip_insets_arguments.update({"Flex Multiple Columns": ["1"]})
2157 inset_is_arg = False
2158 while i < len(document.body):
2159 line = document.body[i]
2160 if line.startswith("\\lang "):
2161 tokenend = len("\\lang ")
2162 lang = line[tokenend:].strip()
2163 current_languages[-1] = lang
2164 elif line.startswith("\\begin_layout "):
2165 current_languages.append(current_languages[-1])
2166 tokenend = len("\\begin_layout ")
2167 layout = line[tokenend:].strip()
2168 current_layouts.append(layout)
2169 elif line.startswith("\\end_layout"):
2170 current_languages.pop()
2171 current_layouts.pop()
2172 elif line.startswith("\\begin_inset Argument "):
2173 tokenend = len("\\begin_inset Argument ")
2174 Argument = line[tokenend:].strip()
2175 # all listpreamble:1 arguments are pass thru
2176 listpreamble = Argument == "listpreamble:1"
2177 layout_arg = current_layouts and Argument in skip_layouts_arguments.get(
2178 current_layouts[-1], []
2180 inset_arg = current_insets and Argument in skip_insets_arguments.get(
2181 current_insets[-1], []
2183 if layout_arg or inset_arg or listpreamble:
2184 # In these arguments, parentheses must not be changed
2185 i = find_end_of_inset(document.body, i) + 1
2189 elif line.startswith("\\begin_inset "):
2190 tokenend = len("\\begin_inset ")
2191 inset = line[tokenend:].strip()
2192 current_insets.append(inset)
2193 if inset in skip_insets:
2194 # In these insets, parentheses must not be changed
2195 i = find_end_of_inset(document.body, i)
2197 elif line.startswith("\\end_inset"):
2199 inset_is_arg = is_in_inset(document.body, i, "\\begin_inset Argument")[0] != -1
2201 current_insets.pop()
2202 elif current_languages[-1] == "hebrew" and not line.startswith("\\"):
2203 document.body[i] = line.replace("(", "\x00").replace(")", "(").replace("\x00", ")")
2207 def revert_hebrew_parentheses(document):
2208 """Store parentheses in Hebrew text reversed"""
2209 # This only exists to keep the convert/revert naming convention
2210 convert_hebrew_parentheses(document)
2213 def revert_malayalam(document):
2214 """Set the document language to English but assure Malayalam output"""
2216 revert_language(document, "malayalam", "", "malayalam")
2219 def revert_soul(document):
2220 """Revert soul module flex insets to ERT"""
2222 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
2225 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
2227 add_to_preamble(document, ["\\usepackage{soul}"])
2229 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
2231 add_to_preamble(document, ["\\usepackage{color}"])
2233 revert_flex_inset(document, "Spaceletters", "\\so")
2234 revert_flex_inset(document, "Strikethrough", "\\st")
2235 revert_flex_inset(document, "Underline", "\\ul")
2236 revert_flex_inset(document, "Highlight", "\\hl")
2237 revert_flex_inset(document, "Capitalize", "\\caps")
2240 def revert_tablestyle(document):
2241 """Remove tablestyle params"""
2243 i = find_token(document.header, "\\tablestyle")
2245 del document.header[i]
2248 def revert_bibfileencodings(document):
2249 """Revert individual Biblatex bibliography encodings"""
2253 i = find_token(document.header, "\\cite_engine", 0)
2255 document.warning("Malformed document! Missing \\cite_engine")
2257 engine = get_value(document.header, "\\cite_engine", i)
2261 if engine in ["biblatex", "biblatex-natbib"]:
2264 # Map lyx to latex encoding names
2268 "armscii8": "armscii8",
2269 "iso8859-1": "latin1",
2270 "iso8859-2": "latin2",
2271 "iso8859-3": "latin3",
2272 "iso8859-4": "latin4",
2273 "iso8859-5": "iso88595",
2274 "iso8859-6": "8859-6",
2275 "iso8859-7": "iso-8859-7",
2276 "iso8859-8": "8859-8",
2277 "iso8859-9": "latin5",
2278 "iso8859-13": "latin7",
2279 "iso8859-15": "latin9",
2280 "iso8859-16": "latin10",
2281 "applemac": "applemac",
2283 "cp437de": "cp437de",
2300 "utf8-platex": "utf8",
2306 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
2309 j = find_end_of_inset(document.body, i)
2311 document.warning("Can't find end of bibtex inset at line %d!!" % (i))
2313 encodings = get_quoted_value(document.body, "file_encodings", i, j)
2317 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
2318 opts = get_quoted_value(document.body, "biblatexopts", i, j)
2319 if len(bibfiles) == 0:
2320 document.warning("Bibtex inset at line %d does not have a bibfile!" % (i))
2321 # remove encoding line
2322 k = find_token(document.body, "file_encodings", i, j)
2324 del document.body[k]
2325 # Re-find inset end line
2326 j = find_end_of_inset(document.body, i)
2328 enclist = encodings.split("\t")
2331 ppp = pp.split(" ", 1)
2332 encmap[ppp[0]] = ppp[1]
2333 for bib in bibfiles:
2334 pr = "\\addbibresource"
2335 if bib in encmap.keys():
2336 pr += "[bibencoding=" + encmap[bib] + "]"
2337 pr += "{" + bib + "}"
2338 add_to_preamble(document, [pr])
2339 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
2340 pcmd = "printbibliography"
2342 pcmd += "[" + opts + "]"
2344 "\\begin_inset ERT",
2347 "\\begin_layout Plain Layout",
2359 "\\begin_layout Standard",
2360 "\\begin_inset Note Note",
2363 "\\begin_layout Plain Layout",
2365 repl += document.body[i : j + 1]
2366 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
2367 document.body[i : j + 1] = repl
2373 def revert_cmidruletrimming(document):
2374 """Remove \\cmidrule trimming"""
2376 # FIXME: Revert to TeX code?
2379 # first, let's find out if we need to do anything
2380 i = find_token(document.body, "<cell ", i + 1)
2383 j = document.body[i].find('trim="')
2386 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
2387 # remove trim option
2388 document.body[i] = rgx.sub("", document.body[i])
2392 r"### Inserted by lyx2lyx (ruby inset) ###",
2393 r"InsetLayout Flex:Ruby",
2394 r" LyxType charstyle",
2395 r" LatexType command",
2399 r" HTMLInnerTag rb",
2400 r' HTMLInnerAttr ""',
2402 r' LabelString "Ruby"',
2403 r" Decoration Conglomerate",
2405 r" \ifdefined\kanjiskip",
2406 r" \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}",
2407 r" \else \ifdefined\luatexversion",
2408 r" \usepackage{luatexja-ruby}",
2409 r" \else \ifdefined\XeTeXversion",
2410 r" \usepackage{ruby}%",
2412 r" \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}",
2414 r" Argument post:1",
2415 r' LabelString "ruby text"',
2416 r' MenuString "Ruby Text|R"',
2417 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
2418 r" Decoration Conglomerate",
2431 def convert_ruby_module(document):
2432 """Use ruby module instead of local module definition"""
2433 if document.del_local_layout(ruby_inset_def):
2434 document.add_module("ruby")
2437 def revert_ruby_module(document):
2438 """Replace ruby module with local module definition"""
2439 if document.del_module("ruby"):
2440 document.append_local_layout(ruby_inset_def)
2443 def convert_utf8_japanese(document):
2444 """Use generic utf8 with Japanese documents."""
2445 lang = get_value(document.header, "\\language")
2446 if not lang.startswith("japanese"):
2448 inputenc = get_value(document.header, "\\inputencoding")
2449 if (lang == "japanese" and inputenc == "utf8-platex") or (
2450 lang == "japanese-cjk" and inputenc == "utf8-cjk"
2452 document.set_parameter("inputencoding", "utf8")
2455 def revert_utf8_japanese(document):
2456 """Use Japanese utf8 variants with Japanese documents."""
2457 inputenc = get_value(document.header, "\\inputencoding")
2458 if inputenc != "utf8":
2460 lang = get_value(document.header, "\\language")
2461 if lang == "japanese":
2462 document.set_parameter("inputencoding", "utf8-platex")
2463 if lang == "japanese-cjk":
2464 document.set_parameter("inputencoding", "utf8-cjk")
2467 def revert_lineno(document):
2468 "Replace lineno setting with user-preamble code."
2470 options = get_quoted_value(document.header, "\\lineno_options", delete=True)
2471 if not get_bool_value(document.header, "\\use_lineno", delete=True):
2474 options = "[" + options + "]"
2475 add_to_preamble(document, ["\\usepackage%s{lineno}" % options, "\\linenumbers"])
2478 def convert_lineno(document):
2479 "Replace user-preamble code with native lineno support."
2482 i = find_token(document.preamble, "\\linenumbers", 1)
2484 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i - 1])
2487 options = usepkg.group(1).strip("[]")
2488 del document.preamble[i - 1 : i + 1]
2490 del_token(document.preamble, "% Added by lyx2lyx", i - 2, i - 1)
2492 k = find_token(document.header, "\\index ")
2494 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
2496 document.header[k:k] = [
2497 "\\use_lineno %d" % use_lineno,
2498 "\\lineno_options %s" % options,
2502 def convert_aaencoding(document):
2503 "Convert default document option due to encoding change in aa class."
2505 if document.textclass != "aa":
2508 i = find_token(document.header, "\\use_default_options true")
2511 val = get_value(document.header, "\\inputencoding")
2513 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
2515 if val == "auto-legacy" or val == "latin9":
2516 document.header[i] = "\\use_default_options false"
2517 k = find_token(document.header, "\\options")
2519 document.header.insert(i, "\\options latin9")
2521 document.header[k] += ",latin9"
2524 def revert_aaencoding(document):
2525 "Revert default document option due to encoding change in aa class."
2527 if document.textclass != "aa":
2530 i = find_token(document.header, "\\use_default_options true")
2533 val = get_value(document.header, "\\inputencoding")
2535 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
2538 document.header[i] = "\\use_default_options false"
2539 k = find_token(document.header, "\\options", 0)
2541 document.header.insert(i, "\\options utf8")
2543 document.header[k] = document.header[k] + ",utf8"
2546 def revert_new_languages(document):
2547 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
2548 and Russian (Petrine orthography)."""
2550 # lyxname: (babelname, polyglossianame)
2552 "azerbaijani": ("azerbaijani", ""),
2553 "bengali": ("", "bengali"),
2554 "churchslavonic": ("", "churchslavonic"),
2555 "oldrussian": ("", "russian"),
2556 "korean": ("", "korean"),
2558 if document.language in new_languages:
2559 used_languages = {document.language}
2561 used_languages = set()
2564 i = find_token(document.body, "\\lang", i + 1)
2567 val = get_value(document.body, "\\lang", i)
2568 if val in new_languages:
2569 used_languages.add(val)
2571 # Korean is already supported via CJK, so leave as-is for Babel
2572 if "korean" in used_languages and (
2573 not get_bool_value(document.header, "\\use_non_tex_fonts")
2574 or get_value(document.header, "\\language_package") == "babel"
2576 used_languages.discard("korean")
2578 for lang in used_languages:
2579 revert_language(document, lang, *new_languages[lang])
2583 r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
2584 r"InsetLayout Flex:Glosse",
2586 r' LabelString "Gloss (old version)"',
2587 r' MenuString "Gloss (old version)"',
2588 r" LatexType environment",
2589 r" LatexName linggloss",
2590 r" Decoration minimalistic",
2595 r" CustomPars false",
2596 r" ForcePlain true",
2597 r" ParbreakIsNewline true",
2598 r" FreeSpacing true",
2599 r" Requires covington",
2602 r" \@ifundefined{linggloss}{%",
2603 r" \newenvironment{linggloss}[2][]{",
2604 r" \def\glosstr{\glt #1}%",
2606 r" {\glosstr\glend}}{}",
2609 r" ResetsFont true",
2611 r" Decoration conglomerate",
2612 r' LabelString "Translation"',
2613 r' MenuString "Glosse Translation|s"',
2614 r' Tooltip "Add a translation for the glosse"',
2619 glosss_inset_def = [
2620 r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
2621 r"InsetLayout Flex:Tri-Glosse",
2623 r' LabelString "Tri-Gloss (old version)"',
2624 r' MenuString "Tri-Gloss (old version)"',
2625 r" LatexType environment",
2626 r" LatexName lingglosss",
2627 r" Decoration minimalistic",
2632 r" CustomPars false",
2633 r" ForcePlain true",
2634 r" ParbreakIsNewline true",
2635 r" FreeSpacing true",
2637 r" Requires covington",
2640 r" \@ifundefined{lingglosss}{%",
2641 r" \newenvironment{lingglosss}[2][]{",
2642 r" \def\glosstr{\glt #1}%",
2644 r" {\glosstr\glend}}{}",
2646 r" ResetsFont true",
2648 r" Decoration conglomerate",
2649 r' LabelString "Translation"',
2650 r' MenuString "Glosse Translation|s"',
2651 r' Tooltip "Add a translation for the glosse"',
2657 def convert_linggloss(document):
2658 "Move old ling glosses to local layout"
2659 if find_token(document.body, "\\begin_inset Flex Glosse", 0) != -1:
2660 document.append_local_layout(gloss_inset_def)
2661 if find_token(document.body, "\\begin_inset Flex Tri-Glosse", 0) != -1:
2662 document.append_local_layout(glosss_inset_def)
2665 def revert_linggloss(document):
2666 "Revert to old ling gloss definitions"
2667 if "linguistics" not in document.get_module_list():
2669 document.del_local_layout(gloss_inset_def)
2670 document.del_local_layout(glosss_inset_def)
2674 "\\begin_inset Flex Interlinear Gloss (2 Lines)",
2675 "\\begin_inset Flex Interlinear Gloss (3 Lines)",
2677 for glosse in glosses:
2680 i = find_token(document.body, glosse, i + 1)
2683 j = find_end_of_inset(document.body, i)
2685 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2688 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2689 endarg = find_end_of_inset(document.body, arg)
2692 argbeginPlain = find_token(
2693 document.body, "\\begin_layout Plain Layout", arg, endarg
2695 if argbeginPlain == -1:
2696 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2698 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2699 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2701 # remove Arg insets and paragraph, if it only contains this inset
2703 document.body[arg - 1] == "\\begin_layout Plain Layout"
2704 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2706 del document.body[arg - 1 : endarg + 4]
2708 del document.body[arg : endarg + 1]
2710 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2711 endarg = find_end_of_inset(document.body, arg)
2714 argbeginPlain = find_token(
2715 document.body, "\\begin_layout Plain Layout", arg, endarg
2717 if argbeginPlain == -1:
2718 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2720 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2721 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2723 # remove Arg insets and paragraph, if it only contains this inset
2725 document.body[arg - 1] == "\\begin_layout Plain Layout"
2726 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2728 del document.body[arg - 1 : endarg + 4]
2730 del document.body[arg : endarg + 1]
2732 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2733 endarg = find_end_of_inset(document.body, arg)
2736 argbeginPlain = find_token(
2737 document.body, "\\begin_layout Plain Layout", arg, endarg
2739 if argbeginPlain == -1:
2740 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2742 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2743 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2745 # remove Arg insets and paragraph, if it only contains this inset
2747 document.body[arg - 1] == "\\begin_layout Plain Layout"
2748 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2750 del document.body[arg - 1 : endarg + 4]
2752 del document.body[arg : endarg + 1]
2754 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2755 endarg = find_end_of_inset(document.body, arg)
2758 argbeginPlain = find_token(
2759 document.body, "\\begin_layout Plain Layout", arg, endarg
2761 if argbeginPlain == -1:
2762 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2764 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2765 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2767 # remove Arg insets and paragraph, if it only contains this inset
2769 document.body[arg - 1] == "\\begin_layout Plain Layout"
2770 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2772 del document.body[arg - 1 : endarg + 4]
2774 del document.body[arg : endarg + 1]
2777 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2780 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2781 endInset = find_end_of_inset(document.body, i)
2782 endPlain = find_end_of_layout(document.body, beginPlain)
2783 precontent = put_cmd_in_ert(cmd)
2784 if len(optargcontent) > 0:
2785 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2786 precontent += put_cmd_in_ert("{")
2789 put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2791 if cmd == "\\trigloss":
2792 postcontent += put_cmd_in_ert("}{") + marg3content
2793 postcontent += put_cmd_in_ert("}")
2795 document.body[endPlain : endInset + 1] = postcontent
2796 document.body[beginPlain + 1 : beginPlain] = precontent
2797 del document.body[i : beginPlain + 1]
2799 document.append_local_layout("Requires covington")
2804 def revert_subexarg(document):
2805 "Revert linguistic subexamples with argument to ERT"
2807 if "linguistics" not in document.get_module_list():
2813 i = find_token(document.body, "\\begin_layout Subexample", i + 1)
2816 j = find_end_of_layout(document.body, i)
2818 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2821 # check for consecutive layouts
2822 k = find_token(document.body, "\\begin_layout", j)
2823 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2825 j = find_end_of_layout(document.body, k)
2827 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2830 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2834 endarg = find_end_of_inset(document.body, arg)
2836 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2837 if argbeginPlain == -1:
2838 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2840 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2841 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2843 # remove Arg insets and paragraph, if it only contains this inset
2845 document.body[arg - 1] == "\\begin_layout Plain Layout"
2846 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2848 del document.body[arg - 1 : endarg + 4]
2850 del document.body[arg : endarg + 1]
2852 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2854 # re-find end of layout
2855 j = find_end_of_layout(document.body, i)
2857 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2860 # check for consecutive layouts
2861 k = find_token(document.body, "\\begin_layout", j)
2862 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2864 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2865 j = find_end_of_layout(document.body, k)
2867 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2870 endev = put_cmd_in_ert("\\end{subexamples}")
2872 document.body[j:j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2873 document.body[i : i + 1] = (
2874 ["\\begin_layout Standard"]
2876 + ["\\end_layout", "", "\\begin_layout Standard"]
2877 + put_cmd_in_ert("\\item ")
2880 document.append_local_layout("Requires covington")
2884 def revert_drs(document):
2885 "Revert DRS insets (linguistics) to ERT"
2887 if "linguistics" not in document.get_module_list():
2892 "\\begin_inset Flex DRS",
2893 "\\begin_inset Flex DRS*",
2894 "\\begin_inset Flex IfThen-DRS",
2895 "\\begin_inset Flex Cond-DRS",
2896 "\\begin_inset Flex QDRS",
2897 "\\begin_inset Flex NegDRS",
2898 "\\begin_inset Flex SDRS",
2903 i = find_token(document.body, drs, i + 1)
2906 j = find_end_of_inset(document.body, i)
2908 document.warning("Malformed LyX document: Can't find end of DRS inset")
2911 # Check for arguments
2912 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2913 endarg = find_end_of_inset(document.body, arg)
2916 argbeginPlain = find_token(
2917 document.body, "\\begin_layout Plain Layout", arg, endarg
2919 if argbeginPlain == -1:
2921 "Malformed LyX document: Can't find Argument 1 plain Layout"
2924 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2925 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2927 # remove Arg insets and paragraph, if it only contains this inset
2929 document.body[arg - 1] == "\\begin_layout Plain Layout"
2930 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2932 del document.body[arg - 1 : endarg + 4]
2934 del document.body[arg : endarg + 1]
2937 j = find_end_of_inset(document.body, i)
2939 document.warning("Malformed LyX document: Can't find end of DRS inset")
2942 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2943 endarg = find_end_of_inset(document.body, arg)
2946 argbeginPlain = find_token(
2947 document.body, "\\begin_layout Plain Layout", arg, endarg
2949 if argbeginPlain == -1:
2951 "Malformed LyX document: Can't find Argument 2 plain Layout"
2954 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2955 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2957 # remove Arg insets and paragraph, if it only contains this inset
2959 document.body[arg - 1] == "\\begin_layout Plain Layout"
2960 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2962 del document.body[arg - 1 : endarg + 4]
2964 del document.body[arg : endarg + 1]
2967 j = find_end_of_inset(document.body, i)
2969 document.warning("Malformed LyX document: Can't find end of DRS inset")
2972 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2973 endarg = find_end_of_inset(document.body, arg)
2974 postarg1content = []
2976 argbeginPlain = find_token(
2977 document.body, "\\begin_layout Plain Layout", arg, endarg
2979 if argbeginPlain == -1:
2981 "Malformed LyX document: Can't find Argument post:1 plain Layout"
2984 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2985 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2987 # remove Arg insets and paragraph, if it only contains this inset
2989 document.body[arg - 1] == "\\begin_layout Plain Layout"
2990 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2992 del document.body[arg - 1 : endarg + 4]
2994 del document.body[arg : endarg + 1]
2997 j = find_end_of_inset(document.body, i)
2999 document.warning("Malformed LyX document: Can't find end of DRS inset")
3002 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
3003 endarg = find_end_of_inset(document.body, arg)
3004 postarg2content = []
3006 argbeginPlain = find_token(
3007 document.body, "\\begin_layout Plain Layout", arg, endarg
3009 if argbeginPlain == -1:
3011 "Malformed LyX document: Can't find Argument post:2 plain Layout"
3014 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3015 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
3017 # remove Arg insets and paragraph, if it only contains this inset
3019 document.body[arg - 1] == "\\begin_layout Plain Layout"
3020 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3022 del document.body[arg - 1 : endarg + 4]
3024 del document.body[arg : endarg + 1]
3027 j = find_end_of_inset(document.body, i)
3029 document.warning("Malformed LyX document: Can't find end of DRS inset")
3032 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
3033 endarg = find_end_of_inset(document.body, arg)
3034 postarg3content = []
3036 argbeginPlain = find_token(
3037 document.body, "\\begin_layout Plain Layout", arg, endarg
3039 if argbeginPlain == -1:
3041 "Malformed LyX document: Can't find Argument post:3 plain Layout"
3044 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3045 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
3047 # remove Arg insets and paragraph, if it only contains this inset
3049 document.body[arg - 1] == "\\begin_layout Plain Layout"
3050 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3052 del document.body[arg - 1 : endarg + 4]
3054 del document.body[arg : endarg + 1]
3057 j = find_end_of_inset(document.body, i)
3059 document.warning("Malformed LyX document: Can't find end of DRS inset")
3062 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
3063 endarg = find_end_of_inset(document.body, arg)
3064 postarg4content = []
3066 argbeginPlain = find_token(
3067 document.body, "\\begin_layout Plain Layout", arg, endarg
3069 if argbeginPlain == -1:
3071 "Malformed LyX document: Can't find Argument post:4 plain Layout"
3074 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3075 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
3077 # remove Arg insets and paragraph, if it only contains this inset
3079 document.body[arg - 1] == "\\begin_layout Plain Layout"
3080 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3082 del document.body[arg - 1 : endarg + 4]
3084 del document.body[arg : endarg + 1]
3086 # The respective LaTeX command
3088 if drs == "\\begin_inset Flex DRS*":
3090 elif drs == "\\begin_inset Flex IfThen-DRS":
3092 elif drs == "\\begin_inset Flex Cond-DRS":
3094 elif drs == "\\begin_inset Flex QDRS":
3096 elif drs == "\\begin_inset Flex NegDRS":
3098 elif drs == "\\begin_inset Flex SDRS":
3101 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
3102 endInset = find_end_of_inset(document.body, i)
3103 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
3104 precontent = put_cmd_in_ert(cmd)
3105 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
3106 if drs == "\\begin_inset Flex SDRS":
3107 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
3108 precontent += put_cmd_in_ert("{")
3111 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
3113 put_cmd_in_ert("}{")
3115 + put_cmd_in_ert("}{")
3117 + put_cmd_in_ert("}")
3119 if cmd == "\\condrs" or cmd == "\\qdrs":
3120 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
3122 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
3124 postcontent = put_cmd_in_ert("}")
3126 document.body[endPlain : endInset + 1] = postcontent
3127 document.body[beginPlain + 1 : beginPlain] = precontent
3128 del document.body[i : beginPlain + 1]
3130 document.append_local_layout("Provides covington 1")
3131 add_to_preamble(document, ["\\usepackage{drs,covington}"])
3136 def revert_babelfont(document):
3137 "Reverts the use of \\babelfont to user preamble"
3139 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3142 i = find_token(document.header, "\\language_package", 0)
3144 document.warning("Malformed LyX document: Missing \\language_package.")
3146 if get_value(document.header, "\\language_package", 0) != "babel":
3149 # check font settings
3151 roman = sans = typew = "default"
3153 sf_scale = tt_scale = 100.0
3155 j = find_token(document.header, "\\font_roman", 0)
3157 document.warning("Malformed LyX document: Missing \\font_roman.")
3159 # We need to use this regex since split() does not handle quote protection
3160 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3161 roman = romanfont[2].strip('"')
3162 romanfont[2] = '"default"'
3163 document.header[j] = " ".join(romanfont)
3165 j = find_token(document.header, "\\font_sans", 0)
3167 document.warning("Malformed LyX document: Missing \\font_sans.")
3169 # We need to use this regex since split() does not handle quote protection
3170 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3171 sans = sansfont[2].strip('"')
3172 sansfont[2] = '"default"'
3173 document.header[j] = " ".join(sansfont)
3175 j = find_token(document.header, "\\font_typewriter", 0)
3177 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3179 # We need to use this regex since split() does not handle quote protection
3180 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3181 typew = ttfont[2].strip('"')
3182 ttfont[2] = '"default"'
3183 document.header[j] = " ".join(ttfont)
3185 i = find_token(document.header, "\\font_osf", 0)
3187 document.warning("Malformed LyX document: Missing \\font_osf.")
3189 osf = str2bool(get_value(document.header, "\\font_osf", i))
3191 j = find_token(document.header, "\\font_sf_scale", 0)
3193 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3195 sfscale = document.header[j].split()
3198 document.header[j] = " ".join(sfscale)
3201 sf_scale = float(val)
3203 document.warning("Invalid font_sf_scale value: " + val)
3205 j = find_token(document.header, "\\font_tt_scale", 0)
3207 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3209 ttscale = document.header[j].split()
3212 document.header[j] = " ".join(ttscale)
3215 tt_scale = float(val)
3217 document.warning("Invalid font_tt_scale value: " + val)
3219 # set preamble stuff
3220 pretext = ["%% This document must be processed with xelatex or lualatex!"]
3221 pretext.append("\\AtBeginDocument{%")
3222 if roman != "default":
3223 pretext.append("\\babelfont{rm}[Mapping=tex-text]{" + roman + "}")
3224 if sans != "default":
3225 sf = "\\babelfont{sf}["
3226 if sf_scale != 100.0:
3227 sf += "Scale=" + str(sf_scale / 100.0) + ","
3228 sf += "Mapping=tex-text]{" + sans + "}"
3230 if typew != "default":
3231 tw = "\\babelfont{tt}"
3232 if tt_scale != 100.0:
3233 tw += "[Scale=" + str(tt_scale / 100.0) + "]"
3234 tw += "{" + typew + "}"
3237 pretext.append("\\defaultfontfeatures{Numbers=OldStyle}")
3239 insert_to_preamble(document, pretext)
3242 def revert_minionpro(document):
3243 "Revert native MinionPro font definition (with extra options) to LaTeX"
3245 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3248 regexp = re.compile(r"(\\font_roman_opts)")
3249 x = find_re(document.header, regexp, 0)
3253 # We need to use this regex since split() does not handle quote protection
3254 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3255 opts = romanopts[1].strip('"')
3257 i = find_token(document.header, "\\font_roman", 0)
3259 document.warning("Malformed LyX document: Missing \\font_roman.")
3262 # We need to use this regex since split() does not handle quote protection
3263 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3264 roman = romanfont[1].strip('"')
3265 if roman != "minionpro":
3267 romanfont[1] = '"default"'
3268 document.header[i] = " ".join(romanfont)
3270 j = find_token(document.header, "\\font_osf true", 0)
3273 preamble = "\\usepackage["
3275 document.header[j] = "\\font_osf false"
3279 preamble += "]{MinionPro}"
3280 add_to_preamble(document, [preamble])
3281 del document.header[x]
3284 def revert_font_opts(document):
3285 "revert font options by outputting \\setxxxfont or \\babelfont to the preamble"
3287 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3288 Babel = get_value(document.header, "\\language_package") == "babel"
3291 regexp = re.compile(r"(\\font_roman_opts)")
3292 i = find_re(document.header, regexp, 0)
3294 # We need to use this regex since split() does not handle quote protection
3295 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3296 opts = romanopts[1].strip('"')
3297 del document.header[i]
3299 regexp = re.compile(r"(\\font_roman)")
3300 i = find_re(document.header, regexp, 0)
3302 # We need to use this regex since split() does not handle quote protection
3303 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3304 font = romanfont[2].strip('"')
3305 romanfont[2] = '"default"'
3306 document.header[i] = " ".join(romanfont)
3307 if font != "default":
3309 preamble = "\\babelfont{rm}["
3311 preamble = "\\setmainfont["
3314 preamble += "Mapping=tex-text]{"
3317 add_to_preamble(document, [preamble])
3320 regexp = re.compile(r"(\\font_sans_opts)")
3321 i = find_re(document.header, regexp, 0)
3324 # We need to use this regex since split() does not handle quote protection
3325 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3326 opts = sfopts[1].strip('"')
3327 del document.header[i]
3329 regexp = re.compile(r"(\\font_sf_scale)")
3330 i = find_re(document.header, regexp, 0)
3332 scaleval = get_value(document.header, "\\font_sf_scale", i).split()[1]
3333 regexp = re.compile(r"(\\font_sans)")
3334 i = find_re(document.header, regexp, 0)
3336 # We need to use this regex since split() does not handle quote protection
3337 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3338 font = sffont[2].strip('"')
3339 sffont[2] = '"default"'
3340 document.header[i] = " ".join(sffont)
3341 if font != "default":
3343 preamble = "\\babelfont{sf}["
3345 preamble = "\\setsansfont["
3349 preamble += "Scale=0."
3350 preamble += scaleval
3352 preamble += "Mapping=tex-text]{"
3355 add_to_preamble(document, [preamble])
3358 regexp = re.compile(r"(\\font_typewriter_opts)")
3359 i = find_re(document.header, regexp, 0)
3362 # We need to use this regex since split() does not handle quote protection
3363 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3364 opts = ttopts[1].strip('"')
3365 del document.header[i]
3367 regexp = re.compile(r"(\\font_tt_scale)")
3368 i = find_re(document.header, regexp, 0)
3370 scaleval = get_value(document.header, "\\font_tt_scale", i).split()[1]
3371 regexp = re.compile(r"(\\font_typewriter)")
3372 i = find_re(document.header, regexp, 0)
3374 # We need to use this regex since split() does not handle quote protection
3375 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3376 font = ttfont[2].strip('"')
3377 ttfont[2] = '"default"'
3378 document.header[i] = " ".join(ttfont)
3379 if font != "default":
3381 preamble = "\\babelfont{tt}["
3383 preamble = "\\setmonofont["
3387 preamble += "Scale=0."
3388 preamble += scaleval
3390 preamble += "Mapping=tex-text]{"
3393 add_to_preamble(document, [preamble])
3396 def revert_plainNotoFonts_xopts(document):
3397 "Revert native (straight) Noto font definition (with extra options) to LaTeX"
3399 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3403 y = find_token(document.header, "\\font_osf true", 0)
3407 regexp = re.compile(r"(\\font_roman_opts)")
3408 x = find_re(document.header, regexp, 0)
3409 if x == -1 and not osf:
3414 # We need to use this regex since split() does not handle quote protection
3415 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3416 opts = romanopts[1].strip('"')
3422 i = find_token(document.header, "\\font_roman", 0)
3426 # We need to use this regex since split() does not handle quote protection
3427 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3428 roman = romanfont[1].strip('"')
3429 if roman != "NotoSerif-TLF":
3432 j = find_token(document.header, "\\font_sans", 0)
3436 # We need to use this regex since split() does not handle quote protection
3437 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3438 sf = sffont[1].strip('"')
3442 j = find_token(document.header, "\\font_typewriter", 0)
3446 # We need to use this regex since split() does not handle quote protection
3447 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3448 tt = ttfont[1].strip('"')
3452 # So we have noto as "complete font"
3453 romanfont[1] = '"default"'
3454 document.header[i] = " ".join(romanfont)
3456 preamble = "\\usepackage["
3458 preamble += "]{noto}"
3459 add_to_preamble(document, [preamble])
3461 document.header[y] = "\\font_osf false"
3463 del document.header[x]
3466 def revert_notoFonts_xopts(document):
3467 "Revert native (extended) Noto font definition (with extra options) to LaTeX"
3469 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3473 fm = createFontMapping(["Noto"])
3474 if revert_fonts(document, fm, fontmap, True):
3475 add_preamble_fonts(document, fontmap)
3478 def revert_IBMFonts_xopts(document):
3479 "Revert native IBM font definition (with extra options) to LaTeX"
3481 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3485 fm = createFontMapping(["IBM"])
3486 if revert_fonts(document, fm, fontmap, True):
3487 add_preamble_fonts(document, fontmap)
3490 def revert_AdobeFonts_xopts(document):
3491 "Revert native Adobe font definition (with extra options) to LaTeX"
3493 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3497 fm = createFontMapping(["Adobe"])
3498 if revert_fonts(document, fm, fontmap, True):
3499 add_preamble_fonts(document, fontmap)
3502 def convert_osf(document):
3503 "Convert \\font_osf param to new format"
3505 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3507 i = find_token(document.header, "\\font_osf", 0)
3509 document.warning("Malformed LyX document: Missing \\font_osf.")
3514 "ADOBESourceSansPro",
3519 "NotoSansExtralight",
3521 osftt = ["ADOBESourceCodePro", "NotoMonoRegular"]
3523 osfval = str2bool(get_value(document.header, "\\font_osf", i))
3524 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
3527 document.header.insert(i, "\\font_sans_osf false")
3528 document.header.insert(i + 1, "\\font_typewriter_osf false")
3532 x = find_token(document.header, "\\font_sans", 0)
3534 document.warning("Malformed LyX document: Missing \\font_sans.")
3536 # We need to use this regex since split() does not handle quote protection
3537 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3538 sf = sffont[1].strip('"')
3540 document.header.insert(i, "\\font_sans_osf true")
3542 document.header.insert(i, "\\font_sans_osf false")
3544 x = find_token(document.header, "\\font_typewriter", 0)
3546 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3548 # We need to use this regex since split() does not handle quote protection
3549 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3550 tt = ttfont[1].strip('"')
3552 document.header.insert(i + 1, "\\font_typewriter_osf true")
3554 document.header.insert(i + 1, "\\font_typewriter_osf false")
3557 document.header.insert(i, "\\font_sans_osf false")
3558 document.header.insert(i + 1, "\\font_typewriter_osf false")
3561 def revert_osf(document):
3562 "Revert \\font_*_osf params"
3564 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3566 i = find_token(document.header, "\\font_roman_osf", 0)
3568 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
3571 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
3572 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
3574 i = find_token(document.header, "\\font_sans_osf", 0)
3576 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
3579 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
3580 del document.header[i]
3582 i = find_token(document.header, "\\font_typewriter_osf", 0)
3584 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
3587 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
3588 del document.header[i]
3591 i = find_token(document.header, "\\font_osf", 0)
3593 document.warning("Malformed LyX document: Missing \\font_osf.")
3595 document.header[i] = "\\font_osf true"
3598 def revert_texfontopts(document):
3599 "Revert native TeX font definitions (with extra options) to LaTeX"
3601 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3616 # First the sf (biolinum only)
3617 regexp = re.compile(r"(\\font_sans_opts)")
3618 x = find_re(document.header, regexp, 0)
3620 # We need to use this regex since split() does not handle quote protection
3621 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3622 opts = sfopts[1].strip('"')
3623 i = find_token(document.header, "\\font_sans", 0)
3625 document.warning("Malformed LyX document: Missing \\font_sans.")
3627 # We need to use this regex since split() does not handle quote protection
3628 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3629 sans = sffont[1].strip('"')
3630 if sans == "biolinum":
3632 sffont[1] = '"default"'
3633 document.header[i] = " ".join(sffont)
3635 j = find_token(document.header, "\\font_sans_osf true", 0)
3638 k = find_token(document.header, "\\font_sf_scale", 0)
3640 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3642 sfscale = document.header[k].split()
3645 document.header[k] = " ".join(sfscale)
3648 sf_scale = float(val)
3650 document.warning("Invalid font_sf_scale value: " + val)
3651 preamble = "\\usepackage["
3653 document.header[j] = "\\font_sans_osf false"
3655 if sf_scale != 100.0:
3656 preamble += "scaled=" + str(sf_scale / 100.0) + ","
3658 preamble += "]{biolinum}"
3659 add_to_preamble(document, [preamble])
3660 del document.header[x]
3662 regexp = re.compile(r"(\\font_roman_opts)")
3663 x = find_re(document.header, regexp, 0)
3667 # We need to use this regex since split() does not handle quote protection
3668 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3669 opts = romanopts[1].strip('"')
3671 i = find_token(document.header, "\\font_roman", 0)
3673 document.warning("Malformed LyX document: Missing \\font_roman.")
3676 # We need to use this regex since split() does not handle quote protection
3677 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3678 roman = romanfont[1].strip('"')
3679 if roman not in rmfonts:
3681 romanfont[1] = '"default"'
3682 document.header[i] = " ".join(romanfont)
3684 if roman == "utopia":
3686 elif roman == "palatino":
3687 package = "mathpazo"
3688 elif roman == "times":
3689 package = "mathptmx"
3690 elif roman == "xcharter":
3691 package = "XCharter"
3693 j = find_token(document.header, "\\font_roman_osf true", 0)
3695 if roman == "cochineal":
3696 osf = "proportional,osf,"
3697 elif roman == "utopia":
3699 elif roman == "garamondx":
3701 elif roman == "libertine":
3703 elif roman == "palatino":
3705 elif roman == "xcharter":
3707 document.header[j] = "\\font_roman_osf false"
3708 k = find_token(document.header, "\\font_sc true", 0)
3710 if roman == "utopia":
3712 if roman == "palatino" and osf == "":
3714 document.header[k] = "\\font_sc false"
3715 preamble = "\\usepackage["
3718 preamble += "]{" + package + "}"
3719 add_to_preamble(document, [preamble])
3720 del document.header[x]
3723 def convert_CantarellFont(document):
3724 "Handle Cantarell font definition to LaTeX"
3726 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3727 fm = createFontMapping(["Cantarell"])
3728 convert_fonts(document, fm, "oldstyle")
3731 def revert_CantarellFont(document):
3732 "Revert native Cantarell font definition to LaTeX"
3734 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3736 fm = createFontMapping(["Cantarell"])
3737 if revert_fonts(document, fm, fontmap, False, True):
3738 add_preamble_fonts(document, fontmap)
3741 def convert_ChivoFont(document):
3742 "Handle Chivo font definition to LaTeX"
3744 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3745 fm = createFontMapping(["Chivo"])
3746 convert_fonts(document, fm, "oldstyle")
3749 def revert_ChivoFont(document):
3750 "Revert native Chivo font definition to LaTeX"
3752 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3754 fm = createFontMapping(["Chivo"])
3755 if revert_fonts(document, fm, fontmap, False, True):
3756 add_preamble_fonts(document, fontmap)
3759 def convert_FiraFont(document):
3760 "Handle Fira font definition to LaTeX"
3762 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3763 fm = createFontMapping(["Fira"])
3764 convert_fonts(document, fm, "lf")
3767 def revert_FiraFont(document):
3768 "Revert native Fira font definition to LaTeX"
3770 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3772 fm = createFontMapping(["Fira"])
3773 if revert_fonts(document, fm, fontmap, False, True):
3774 add_preamble_fonts(document, fontmap)
3777 def convert_Semibolds(document):
3778 "Move semibold options to extraopts"
3780 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3782 i = find_token(document.header, "\\font_roman", 0)
3784 document.warning("Malformed LyX document: Missing \\font_roman.")
3786 # We need to use this regex since split() does not handle quote protection
3787 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3788 roman = romanfont[1].strip('"')
3789 if roman == "IBMPlexSerifSemibold":
3790 romanfont[1] = '"IBMPlexSerif"'
3791 document.header[i] = " ".join(romanfont)
3793 if NonTeXFonts == False:
3794 regexp = re.compile(r"(\\font_roman_opts)")
3795 x = find_re(document.header, regexp, 0)
3797 # Sensible place to insert tag
3798 fo = find_token(document.header, "\\font_sf_scale")
3800 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3802 document.header.insert(fo, '\\font_roman_opts "semibold"')
3804 # We need to use this regex since split() does not handle quote protection
3805 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3806 document.header[x] = (
3807 '\\font_roman_opts "semibold, ' + romanopts[1].strip('"') + '"'
3810 i = find_token(document.header, "\\font_sans", 0)
3812 document.warning("Malformed LyX document: Missing \\font_sans.")
3814 # We need to use this regex since split() does not handle quote protection
3815 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3816 sf = sffont[1].strip('"')
3817 if sf == "IBMPlexSansSemibold":
3818 sffont[1] = '"IBMPlexSans"'
3819 document.header[i] = " ".join(sffont)
3821 if NonTeXFonts == False:
3822 regexp = re.compile(r"(\\font_sans_opts)")
3823 x = find_re(document.header, regexp, 0)
3825 # Sensible place to insert tag
3826 fo = find_token(document.header, "\\font_sf_scale")
3828 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3830 document.header.insert(fo, '\\font_sans_opts "semibold"')
3832 # We need to use this regex since split() does not handle quote protection
3833 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3834 document.header[x] = (
3835 '\\font_sans_opts "semibold, ' + sfopts[1].strip('"') + '"'
3838 i = find_token(document.header, "\\font_typewriter", 0)
3840 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3842 # We need to use this regex since split() does not handle quote protection
3843 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3844 tt = ttfont[1].strip('"')
3845 if tt == "IBMPlexMonoSemibold":
3846 ttfont[1] = '"IBMPlexMono"'
3847 document.header[i] = " ".join(ttfont)
3849 if NonTeXFonts == False:
3850 regexp = re.compile(r"(\\font_typewriter_opts)")
3851 x = find_re(document.header, regexp, 0)
3853 # Sensible place to insert tag
3854 fo = find_token(document.header, "\\font_tt_scale")
3856 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3858 document.header.insert(fo, '\\font_typewriter_opts "semibold"')
3860 # We need to use this regex since split() does not handle quote protection
3861 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3862 document.header[x] = (
3863 '\\font_typewriter_opts "semibold, ' + ttopts[1].strip('"') + '"'
3867 def convert_NotoRegulars(document):
3868 "Merge diverse noto reagular fonts"
3870 i = find_token(document.header, "\\font_roman", 0)
3872 document.warning("Malformed LyX document: Missing \\font_roman.")
3874 # We need to use this regex since split() does not handle quote protection
3875 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3876 roman = romanfont[1].strip('"')
3877 if roman == "NotoSerif-TLF":
3878 romanfont[1] = '"NotoSerifRegular"'
3879 document.header[i] = " ".join(romanfont)
3881 i = find_token(document.header, "\\font_sans", 0)
3883 document.warning("Malformed LyX document: Missing \\font_sans.")
3885 # We need to use this regex since split() does not handle quote protection
3886 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3887 sf = sffont[1].strip('"')
3888 if sf == "NotoSans-TLF":
3889 sffont[1] = '"NotoSansRegular"'
3890 document.header[i] = " ".join(sffont)
3892 i = find_token(document.header, "\\font_typewriter", 0)
3894 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3896 # We need to use this regex since split() does not handle quote protection
3897 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3898 tt = ttfont[1].strip('"')
3899 if tt == "NotoMono-TLF":
3900 ttfont[1] = '"NotoMonoRegular"'
3901 document.header[i] = " ".join(ttfont)
3904 def convert_CrimsonProFont(document):
3905 "Handle CrimsonPro font definition to LaTeX"
3907 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3908 fm = createFontMapping(["CrimsonPro"])
3909 convert_fonts(document, fm, "lf")
3912 def revert_CrimsonProFont(document):
3913 "Revert native CrimsonPro font definition to LaTeX"
3915 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3917 fm = createFontMapping(["CrimsonPro"])
3918 if revert_fonts(document, fm, fontmap, False, True):
3919 add_preamble_fonts(document, fontmap)
3922 def revert_pagesizes(document):
3923 "Revert new page sizes in memoir and KOMA to options"
3925 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3928 i = find_token(document.header, "\\use_geometry true", 0)
3943 i = find_token(document.header, "\\papersize", 0)
3945 document.warning("Malformed LyX document! Missing \\papersize header.")
3947 val = get_value(document.header, "\\papersize", i)
3952 document.header[i] = "\\papersize default"
3954 i = find_token(document.header, "\\options", 0)
3956 i = find_token(document.header, "\\textclass", 0)
3958 document.warning("Malformed LyX document! Missing \\textclass header.")
3960 document.header.insert(i, "\\options " + val)
3962 document.header[i] = document.header[i] + "," + val
3965 def convert_pagesizes(document):
3966 "Convert to new page sizes in memoir and KOMA to options"
3968 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3971 i = find_token(document.header, "\\use_geometry true", 0)
3986 i = find_token(document.header, "\\papersize", 0)
3988 document.warning("Malformed LyX document! Missing \\papersize header.")
3990 val = get_value(document.header, "\\papersize", i)
3995 i = find_token(document.header, "\\use_geometry false", 0)
3997 # Maintain use of geometry
3998 document.header[1] = "\\use_geometry true"
4001 def revert_komafontsizes(document):
4002 "Revert new font sizes in KOMA to options"
4004 if document.textclass[:3] != "scr":
4007 i = find_token(document.header, "\\paperfontsize", 0)
4009 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
4012 defsizes = ["default", "10", "11", "12"]
4014 val = get_value(document.header, "\\paperfontsize", i)
4019 document.header[i] = "\\paperfontsize default"
4021 fsize = "fontsize=" + val
4023 i = find_token(document.header, "\\options", 0)
4025 i = find_token(document.header, "\\textclass", 0)
4027 document.warning("Malformed LyX document! Missing \\textclass header.")
4029 document.header.insert(i, "\\options " + fsize)
4031 document.header[i] = document.header[i] + "," + fsize
4034 def revert_dupqualicites(document):
4035 "Revert qualified citation list commands with duplicate keys to ERT"
4037 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
4038 # we need to revert those with multiple uses of the same key.
4042 i = find_token(document.header, "\\cite_engine", 0)
4044 document.warning("Malformed document! Missing \\cite_engine")
4046 engine = get_value(document.header, "\\cite_engine", i)
4048 if engine not in ["biblatex", "biblatex-natbib"]:
4051 # Citation insets that support qualified lists, with their LaTeX code
4055 "citet": "textcites",
4056 "Citet": "Textcites",
4057 "citep": "parencites",
4058 "Citep": "Parencites",
4059 "Footcite": "Smartcites",
4060 "footcite": "smartcites",
4061 "Autocite": "Autocites",
4062 "autocite": "autocites",
4067 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
4070 j = find_end_of_inset(document.body, i)
4072 document.warning("Can't find end of citation inset at line %d!!" % (i))
4076 k = find_token(document.body, "LatexCommand", i, j)
4078 document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
4082 cmd = get_value(document.body, "LatexCommand", k)
4083 if cmd not in list(ql_citations.keys()):
4087 pres = find_token(document.body, "pretextlist", i, j)
4088 posts = find_token(document.body, "posttextlist", i, j)
4089 if pres == -1 and posts == -1:
4094 key = get_quoted_value(document.body, "key", i, j)
4096 document.warning("Citation inset at line %d does not have a key!" % (i))
4100 keys = key.split(",")
4101 ukeys = list(set(keys))
4102 if len(keys) == len(ukeys):
4107 pretexts = get_quoted_value(document.body, "pretextlist", pres)
4108 posttexts = get_quoted_value(document.body, "posttextlist", posts)
4110 pre = get_quoted_value(document.body, "before", i, j)
4111 post = get_quoted_value(document.body, "after", i, j)
4112 prelist = pretexts.split("\t")
4115 ppp = pp.split(" ", 1)
4121 if ppp[0] in premap:
4122 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
4124 premap[ppp[0]] = val
4125 postlist = posttexts.split("\t")
4128 ppp = pp.split(" ", 1)
4134 if ppp[0] in postmap:
4135 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
4137 postmap[ppp[0]] = val
4138 # Replace known new commands with ERT
4139 if "(" in pre or ")" in pre:
4140 pre = "{" + pre + "}"
4141 if "(" in post or ")" in post:
4142 post = "{" + post + "}"
4143 res = "\\" + ql_citations[cmd]
4145 res += "(" + pre + ")"
4147 res += "(" + post + ")"
4151 if premap.get(kk, "") != "":
4152 akeys = premap[kk].split("\t", 1)
4155 res += "[" + akey + "]"
4157 premap[kk] = "\t".join(akeys[1:])
4160 if postmap.get(kk, "") != "":
4161 akeys = postmap[kk].split("\t", 1)
4164 res += "[" + akey + "]"
4166 postmap[kk] = "\t".join(akeys[1:])
4169 elif premap.get(kk, "") != "":
4171 res += "{" + kk + "}"
4172 document.body[i : j + 1] = put_cmd_in_ert([res])
4175 def convert_pagesizenames(document):
4176 "Convert LyX page sizes names"
4178 i = find_token(document.header, "\\papersize", 0)
4180 document.warning("Malformed LyX document! Missing \\papersize header.")
4208 val = get_value(document.header, "\\papersize", i)
4210 newval = val.replace("paper", "")
4211 document.header[i] = "\\papersize " + newval
4214 def revert_pagesizenames(document):
4215 "Convert LyX page sizes names"
4217 i = find_token(document.header, "\\papersize", 0)
4219 document.warning("Malformed LyX document! Missing \\papersize header.")
4247 val = get_value(document.header, "\\papersize", i)
4249 newval = val + "paper"
4250 document.header[i] = "\\papersize " + newval
4253 def revert_theendnotes(document):
4254 "Reverts native support of \\theendnotes to TeX-code"
4257 "endnotes" not in document.get_module_list()
4258 and "foottoend" not in document.get_module_list()
4264 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
4267 j = find_end_of_inset(document.body, i)
4269 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4272 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
4275 def revert_enotez(document):
4276 "Reverts native support of enotez package to TeX-code"
4279 "enotez" not in document.get_module_list()
4280 and "foottoenotez" not in document.get_module_list()
4285 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
4288 revert_flex_inset(document, "Endnote", "\\endnote")
4292 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
4295 j = find_end_of_inset(document.body, i)
4297 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4301 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
4304 add_to_preamble(document, ["\\usepackage{enotez}"])
4305 document.del_module("enotez")
4306 document.del_module("foottoenotez")
4309 def revert_memoir_endnotes(document):
4310 "Reverts native support of memoir endnotes to TeX-code"
4312 if document.textclass != "memoir":
4315 encommand = "\\pagenote"
4316 modules = document.get_module_list()
4319 or "foottoenotez" in modules
4320 or "endnotes" in modules
4321 or "foottoend" in modules
4323 encommand = "\\endnote"
4325 revert_flex_inset(document, "Endnote", encommand)
4329 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
4332 j = find_end_of_inset(document.body, i)
4334 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4337 if document.body[i] == "\\begin_inset FloatList pagenote*":
4338 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
4340 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
4341 add_to_preamble(document, ["\\makepagenote"])
4344 def revert_totalheight(document):
4345 "Reverts graphics height parameter from totalheight to height"
4347 relative_heights = {
4348 "\\textwidth": "text%",
4349 "\\columnwidth": "col%",
4350 "\\paperwidth": "page%",
4351 "\\linewidth": "line%",
4352 "\\textheight": "theight%",
4353 "\\paperheight": "pheight%",
4354 "\\baselineskip ": "baselineskip%",
4358 i = find_token(document.body, "\\begin_inset Graphics", i)
4361 j = find_end_of_inset(document.body, i)
4363 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4367 rx = re.compile(r"\s*special\s*(\S+)$")
4368 rxx = re.compile(r"(\d*\.*\d+)(\S+)$")
4369 k = find_re(document.body, rx, i, j)
4373 m = rx.match(document.body[k])
4375 special = m.group(1)
4376 mspecial = special.split(",")
4377 for spc in mspecial:
4378 if spc.startswith("height="):
4379 oldheight = spc.split("=")[1]
4380 ms = rxx.search(oldheight)
4382 oldunit = ms.group(2)
4383 if oldunit in list(relative_heights.keys()):
4384 oldval = str(float(ms.group(1)) * 100)
4385 oldunit = relative_heights[oldunit]
4386 oldheight = oldval + oldunit
4387 mspecial.remove(spc)
4389 if len(mspecial) > 0:
4390 special = ",".join(mspecial)
4394 rx = re.compile(r"(\s*height\s*)(\S+)$")
4395 kk = find_re(document.body, rx, i, j)
4397 m = rx.match(document.body[kk])
4403 val = val + "," + special
4404 document.body[k] = "\tspecial " + "totalheight=" + val
4406 document.body.insert(kk, "\tspecial totalheight=" + val)
4408 document.body[kk] = m.group(1) + oldheight
4410 del document.body[kk]
4411 elif oldheight != "":
4413 document.body[k] = "\tspecial " + special
4414 document.body.insert(k, "\theight " + oldheight)
4416 document.body[k] = "\theight " + oldheight
4420 def convert_totalheight(document):
4421 "Converts graphics height parameter from totalheight to height"
4423 relative_heights = {
4424 "text%": "\\textwidth",
4425 "col%": "\\columnwidth",
4426 "page%": "\\paperwidth",
4427 "line%": "\\linewidth",
4428 "theight%": "\\textheight",
4429 "pheight%": "\\paperheight",
4430 "baselineskip%": "\\baselineskip",
4434 i = find_token(document.body, "\\begin_inset Graphics", i)
4437 j = find_end_of_inset(document.body, i)
4439 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4443 rx = re.compile(r"\s*special\s*(\S+)$")
4444 k = find_re(document.body, rx, i, j)
4448 m = rx.match(document.body[k])
4450 special = m.group(1)
4451 mspecial = special.split(",")
4452 for spc in mspecial:
4453 if spc[:12] == "totalheight=":
4454 newheight = spc.split("=")[1]
4455 mspecial.remove(spc)
4457 if len(mspecial) > 0:
4458 special = ",".join(mspecial)
4462 rx = re.compile(r"(\s*height\s*)(\d+\.?\d*)(\S+)$")
4463 kk = find_re(document.body, rx, i, j)
4465 m = rx.match(document.body[kk])
4470 if unit in list(relative_heights.keys()):
4471 val = str(float(val) / 100)
4472 unit = relative_heights[unit]
4475 val = val + unit + "," + special
4476 document.body[k] = "\tspecial " + "height=" + val
4478 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
4480 document.body[kk] = m.group(1) + newheight
4482 del document.body[kk]
4483 elif newheight != "":
4484 document.body.insert(k, "\theight " + newheight)
4488 def convert_changebars(document):
4489 "Converts the changebars module to native solution"
4491 if "changebars" not in document.get_module_list():
4494 i = find_token(document.header, "\\output_changes", 0)
4496 document.warning("Malformed LyX document! Missing \\output_changes header.")
4497 document.del_module("changebars")
4500 document.header.insert(i, "\\change_bars true")
4501 document.del_module("changebars")
4504 def revert_changebars(document):
4505 "Converts native changebar param to module"
4507 i = find_token(document.header, "\\change_bars", 0)
4509 document.warning("Malformed LyX document! Missing \\change_bars header.")
4512 val = get_value(document.header, "\\change_bars", i)
4515 document.add_module("changebars")
4517 del document.header[i]
4520 def convert_postpone_fragile(document):
4521 "Adds false \\postpone_fragile_content buffer param"
4523 i = find_token(document.header, "\\output_changes", 0)
4525 document.warning("Malformed LyX document! Missing \\output_changes header.")
4527 # Set this to false for old documents (see #2154)
4528 document.header.insert(i, "\\postpone_fragile_content false")
4531 def revert_postpone_fragile(document):
4532 "Remove \\postpone_fragile_content buffer param"
4534 i = find_token(document.header, "\\postpone_fragile_content", 0)
4536 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
4539 del document.header[i]
4542 def revert_colrow_tracking(document):
4543 "Remove change tag from tabular columns/rows"
4546 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
4549 j = find_end_of_inset(document.body, i + 1)
4551 document.warning("Malformed LyX document: Could not find end of tabular.")
4553 for k in range(i, j):
4554 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
4556 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
4557 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
4559 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
4562 def convert_counter_maintenance(document):
4563 "Convert \\maintain_unincluded_children buffer param from boolean value tro tristate"
4565 i = find_token(document.header, "\\maintain_unincluded_children", 0)
4567 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
4570 val = get_value(document.header, "\\maintain_unincluded_children", i)
4573 document.header[i] = "\\maintain_unincluded_children strict"
4575 document.header[i] = "\\maintain_unincluded_children no"
4578 def revert_counter_maintenance(document):
4579 "Revert \\maintain_unincluded_children buffer param to previous boolean value"
4581 i = find_token(document.header, "\\maintain_unincluded_children", 0)
4583 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
4586 val = get_value(document.header, "\\maintain_unincluded_children", i)
4589 document.header[i] = "\\maintain_unincluded_children false"
4591 document.header[i] = "\\maintain_unincluded_children true"
4594 def revert_counter_inset(document):
4595 "Revert counter inset to ERT, where possible"
4597 needed_counters = {}
4599 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
4602 j = find_end_of_inset(document.body, i)
4604 document.warning("Can't find end of counter inset at line %d!" % i)
4607 lyx = get_quoted_value(document.body, "lyxonly", i, j)
4609 # there is nothing we can do to affect the LyX counters
4610 document.body[i : j + 1] = []
4613 cnt = get_quoted_value(document.body, "counter", i, j)
4615 document.warning("No counter given for inset at line %d!" % i)
4619 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
4620 document.warning(cmd)
4623 val = get_quoted_value(document.body, "value", i, j)
4625 document.warning("Can't convert counter inset at line %d!" % i)
4627 ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{{val}}}")
4628 elif cmd == "addto":
4629 val = get_quoted_value(document.body, "value", i, j)
4631 document.warning("Can't convert counter inset at line %d!" % i)
4633 ert = put_cmd_in_ert(f"\\addtocounter{{{cnt}}}{{{val}}}")
4634 elif cmd == "reset":
4635 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
4637 needed_counters[cnt] = 1
4638 savecnt = "LyXSave" + cnt
4639 ert = put_cmd_in_ert(f"\\setcounter{{{savecnt}}}{{\\value{{{cnt}}}}}")
4640 elif cmd == "restore":
4641 needed_counters[cnt] = 1
4642 savecnt = "LyXSave" + cnt
4643 ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{\\value{{{savecnt}}}}}")
4645 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
4648 document.body[i : j + 1] = ert
4653 for cnt in needed_counters:
4654 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
4656 add_to_preamble(document, pretext)
4659 def revert_ams_spaces(document):
4660 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
4662 insets = ["\\medspace{}", "\\thickspace{}"]
4663 for inset in insets:
4665 i = find_token(document.body, "\\begin_inset space " + inset, i)
4668 end = find_end_of_inset(document.body, i)
4669 subst = put_cmd_in_ert(inset)
4670 document.body[i : end + 1] = subst
4674 # load amsmath in the preamble if not already loaded
4675 i = find_token(document.header, "\\use_package amsmath 2", 0)
4677 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
4681 def convert_parskip(document):
4682 "Move old parskip settings to preamble"
4684 i = find_token(document.header, "\\paragraph_separation skip", 0)
4688 j = find_token(document.header, "\\defskip", 0)
4690 document.warning("Malformed LyX document! Missing \\defskip.")
4693 val = get_value(document.header, "\\defskip", j)
4695 skipval = "\\medskipamount"
4696 if val == "smallskip" or val == "medskip" or val == "bigskip":
4697 skipval = "\\" + val + "amount"
4703 ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"],
4706 document.header[i] = "\\paragraph_separation indent"
4707 document.header[j] = "\\paragraph_indentation default"
4710 def revert_parskip(document):
4711 "Revert new parskip settings to preamble"
4713 i = find_token(document.header, "\\paragraph_separation skip", 0)
4717 j = find_token(document.header, "\\defskip", 0)
4719 document.warning("Malformed LyX document! Missing \\defskip.")
4722 val = get_value(document.header, "\\defskip", j)
4725 if val == "smallskip" or val == "medskip" or val == "bigskip":
4726 skipval = "[skip=\\" + val + "amount]"
4727 elif val == "fullline":
4728 skipval = "[skip=\\baselineskip]"
4729 elif val != "halfline":
4730 skipval = "[skip={" + val + "}]"
4732 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
4734 document.header[i] = "\\paragraph_separation indent"
4735 document.header[j] = "\\paragraph_indentation default"
4738 def revert_line_vspaces(document):
4739 "Revert fulline and halfline vspaces to TeX"
4741 "fullline*": "\\vspace*{\\baselineskip}",
4742 "fullline": "\\vspace{\\baselineskip}",
4743 "halfline*": "\\vspace*{0.5\\baselineskip}",
4744 "halfline": "\\vspace{0.5\\baselineskip}",
4746 for inset in insets.keys():
4748 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
4751 end = find_end_of_inset(document.body, i)
4752 subst = put_cmd_in_ert(insets[inset])
4753 document.body[i : end + 1] = subst
4756 def convert_libertinus_rm_fonts(document):
4757 """Handle Libertinus serif fonts definition to LaTeX"""
4759 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
4760 fm = createFontMapping(["Libertinus"])
4761 convert_fonts(document, fm)
4764 def revert_libertinus_rm_fonts(document):
4765 """Revert Libertinus serif font definition to LaTeX"""
4767 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
4769 fm = createFontMapping(["libertinus"])
4770 if revert_fonts(document, fm, fontmap):
4771 add_preamble_fonts(document, fontmap)
4774 def revert_libertinus_sftt_fonts(document):
4775 "Revert Libertinus sans and tt font definitions to LaTeX"
4777 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
4779 i = find_token(document.header, '\\font_sans "LibertinusSans-LF"', 0)
4781 j = find_token(document.header, "\\font_sans_osf true", 0)
4783 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
4784 document.header[j] = "\\font_sans_osf false"
4786 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
4787 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
4789 sfval = find_token(document.header, "\\font_sf_scale", 0)
4791 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
4793 sfscale = document.header[sfval].split()
4796 document.header[sfval] = " ".join(sfscale)
4799 sf_scale = float(val)
4801 document.warning("Invalid font_sf_scale value: " + val)
4802 if sf_scale != "100.0":
4806 "\\renewcommand*{\\LibertinusSans@scale}{"
4807 + str(sf_scale / 100.0)
4812 i = find_token(document.header, '\\font_typewriter "LibertinusMono-TLF"', 0)
4814 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
4815 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
4817 ttval = find_token(document.header, "\\font_tt_scale", 0)
4819 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
4821 ttscale = document.header[ttval].split()
4824 document.header[ttval] = " ".join(ttscale)
4827 tt_scale = float(val)
4829 document.warning("Invalid font_tt_scale value: " + val)
4830 if tt_scale != "100.0":
4834 "\\renewcommand*{\\LibertinusMono@scale}{"
4835 + str(tt_scale / 100.0)
4841 def revert_docbook_table_output(document):
4842 i = find_token(document.header, "\\docbook_table_output")
4844 del document.header[i]
4847 def revert_nopagebreak(document):
4849 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
4852 end = find_end_of_inset(document.body, i)
4854 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
4856 subst = put_cmd_in_ert("\\nopagebreak{}")
4857 document.body[i : end + 1] = subst
4860 def revert_hrquotes(document):
4861 "Revert Hungarian Quotation marks"
4863 i = find_token(document.header, "\\quotes_style hungarian", 0)
4865 document.header[i] = "\\quotes_style polish"
4869 i = find_token(document.body, "\\begin_inset Quotes h")
4872 if document.body[i] == "\\begin_inset Quotes hld":
4873 document.body[i] = "\\begin_inset Quotes pld"
4874 elif document.body[i] == "\\begin_inset Quotes hrd":
4875 document.body[i] = "\\begin_inset Quotes prd"
4876 elif document.body[i] == "\\begin_inset Quotes hls":
4877 document.body[i] = "\\begin_inset Quotes ald"
4878 elif document.body[i] == "\\begin_inset Quotes hrs":
4879 document.body[i] = "\\begin_inset Quotes ard"
4882 def convert_math_refs(document):
4885 i = find_token(document.body, "\\begin_inset Formula", i)
4888 j = find_end_of_inset(document.body, i)
4890 document.warning("Can't find end of inset at line %d of body!" % i)
4894 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4898 def revert_math_refs(document):
4901 i = find_token(document.body, "\\begin_inset Formula", i)
4904 j = find_end_of_inset(document.body, i)
4906 document.warning("Can't find end of inset at line %d of body!" % i)
4910 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4911 if "\\labelonly" in document.body[i]:
4912 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4916 def convert_branch_colors(document):
4917 "Convert branch colors to semantic values"
4921 i = find_token(document.header, "\\branch", i)
4924 j = find_token(document.header, "\\end_branch", i)
4926 document.warning("Malformed LyX document. Can't find end of branch definition!")
4928 # We only support the standard LyX background for now
4929 k = find_token(document.header, "\\color #faf0e6", i, j)
4931 document.header[k] = "\\color background"
4935 def revert_branch_colors(document):
4936 "Revert semantic branch colors"
4940 i = find_token(document.header, "\\branch", i)
4943 j = find_token(document.header, "\\end_branch", i)
4945 document.warning("Malformed LyX document. Can't find end of branch definition!")
4947 k = find_token(document.header, "\\color", i, j)
4949 bcolor = get_value(document.header, "\\color", k)
4950 if bcolor[1] != "#":
4951 # this will be read as background by LyX 2.3
4952 document.header[k] = "\\color none"
4956 def revert_darkmode_graphics(document):
4957 "Revert darkModeSensitive InsetGraphics param"
4961 i = find_token(document.body, "\\begin_inset Graphics", i)
4964 j = find_end_of_inset(document.body, i)
4966 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4969 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4971 del document.body[k]
4975 def revert_branch_darkcols(document):
4976 "Revert dark branch colors"
4980 i = find_token(document.header, "\\branch", i)
4983 j = find_token(document.header, "\\end_branch", i)
4985 document.warning("Malformed LyX document. Can't find end of branch definition!")
4987 k = find_token(document.header, "\\color", i, j)
4989 m = re.search("\\\\color (\\S+) (\\S+)", document.header[k])
4991 document.header[k] = "\\color " + m.group(1)
4995 def revert_vcolumns2(document):
4996 """Revert varwidth columns with line breaks etc."""
4998 needvarwidth = False
5000 needcellvarwidth = False
5003 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
5006 j = find_end_of_inset(document.body, i)
5008 document.warning("Malformed LyX document: Could not find end of tabular.")
5011 # Collect necessary column information
5013 nrows = int(document.body[i + 1].split('"')[3])
5014 ncols = int(document.body[i + 1].split('"')[5])
5016 for k in range(ncols):
5017 m = find_token(document.body, "<column", m)
5018 width = get_option_value(document.body[m], "width")
5019 varwidth = get_option_value(document.body[m], "varwidth")
5020 alignment = get_option_value(document.body[m], "alignment")
5021 valignment = get_option_value(document.body[m], "valignment")
5022 special = get_option_value(document.body[m], "special")
5023 col_info.append([width, varwidth, alignment, valignment, special, m])
5028 for row in range(nrows):
5029 for col in range(ncols):
5030 m = find_token(document.body, "<cell", m)
5031 multicolumn = get_option_value(document.body[m], "multicolumn") != ""
5032 multirow = get_option_value(document.body[m], "multirow") != ""
5033 fixedwidth = get_option_value(document.body[m], "width") != ""
5034 rotate = get_option_value(document.body[m], "rotate")
5035 cellalign = get_option_value(document.body[m], "alignment")
5036 cellvalign = get_option_value(document.body[m], "valignment")
5037 # Check for: linebreaks, multipars, non-standard environments
5039 endcell = find_token(document.body, "</cell>", begcell)
5042 find_token(document.body, "\\begin_inset Newline", begcell, endcell)
5045 vcand = not fixedwidth
5046 elif count_pars_in_inset(document.body, begcell + 2) > 1:
5047 vcand = not fixedwidth
5048 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
5049 vcand = not fixedwidth
5050 colalignment = col_info[col][2]
5051 colvalignment = col_info[col][3]
5053 if rotate == "" and (
5054 (colalignment == "left" and colvalignment == "top")
5057 and cellalign == "left"
5058 and cellvalign == "top"
5062 col_info[col][0] == ""
5063 and col_info[col][1] == ""
5064 and col_info[col][4] == ""
5067 col_line = col_info[col][5]
5069 vval = "V{\\linewidth}"
5071 document.body[m] = (
5072 document.body[m][:-1] + ' special="' + vval + '">'
5075 document.body[col_line] = (
5076 document.body[col_line][:-1]
5083 if multicolumn or multirow:
5084 if cellvalign == "middle":
5086 elif cellvalign == "bottom":
5089 if colvalignment == "middle":
5091 elif colvalignment == "bottom":
5093 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
5094 elt = find_token_backwards(document.body, "\\end_layout", endcell)
5095 if flt != -1 and elt != -1:
5097 # we need to reset character layouts if necessary
5098 el = find_token(document.body, "\\emph on", flt, elt)
5100 extralines.append("\\emph default")
5101 el = find_token(document.body, "\\noun on", flt, elt)
5103 extralines.append("\\noun default")
5104 el = find_token(document.body, "\\series", flt, elt)
5106 extralines.append("\\series default")
5107 el = find_token(document.body, "\\family", flt, elt)
5109 extralines.append("\\family default")
5110 el = find_token(document.body, "\\shape", flt, elt)
5112 extralines.append("\\shape default")
5113 el = find_token(document.body, "\\color", flt, elt)
5115 extralines.append("\\color inherit")
5116 el = find_token(document.body, "\\size", flt, elt)
5118 extralines.append("\\size default")
5119 el = find_token(document.body, "\\bar under", flt, elt)
5121 extralines.append("\\bar default")
5122 el = find_token(document.body, "\\uuline on", flt, elt)
5124 extralines.append("\\uuline default")
5125 el = find_token(document.body, "\\uwave on", flt, elt)
5127 extralines.append("\\uwave default")
5128 el = find_token(document.body, "\\strikeout on", flt, elt)
5130 extralines.append("\\strikeout default")
5131 document.body[elt : elt + 1] = (
5133 + put_cmd_in_ert("\\end{cellvarwidth}")
5137 for q in range(flt, elt):
5138 if document.body[q] != "" and document.body[q][0] != "\\":
5140 if document.body[q][:5] == "\\lang":
5144 document.body[parlang + 1 : parlang + 1] = put_cmd_in_ert(
5145 "\\begin{cellvarwidth}" + alarg
5148 document.body[flt + 1 : flt + 1] = put_cmd_in_ert(
5149 "\\begin{cellvarwidth}" + alarg
5151 needcellvarwidth = True
5153 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
5154 # with newlines, and we do not want that)
5156 endcell = find_token(document.body, "</cell>", begcell)
5160 "\\begin_inset Newline newline",
5167 "\\begin_inset Newline linebreak",
5174 nle = find_end_of_inset(document.body, nl)
5175 del document.body[nle : nle + 1]
5177 document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
5179 document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
5180 # Replace parbreaks in multirow with \\endgraf
5181 if multirow == True:
5182 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
5185 elt = find_end_of_layout(document.body, flt)
5188 "Malformed LyX document! Missing layout end."
5191 endcell = find_token(document.body, "</cell>", begcell)
5193 document.body, "\\begin_layout", elt, endcell
5197 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
5203 if needarray == True:
5204 add_to_preamble(document, ["\\usepackage{array}"])
5205 if needcellvarwidth == True:
5209 "%% Variable width box for table cells",
5210 "\\newenvironment{cellvarwidth}[1][t]",
5211 " {\\begin{varwidth}[#1]{\\linewidth}}",
5212 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}",
5215 if needvarwidth == True:
5216 add_to_preamble(document, ["\\usepackage{varwidth}"])
5219 def convert_vcolumns2(document):
5220 """Convert varwidth ERT to native"""
5224 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
5227 j = find_end_of_inset(document.body, i)
5229 document.warning("Malformed LyX document: Could not find end of tabular.")
5233 nrows = int(document.body[i + 1].split('"')[3])
5234 ncols = int(document.body[i + 1].split('"')[5])
5237 for row in range(nrows):
5238 for col in range(ncols):
5239 m = find_token(document.body, "<cell", m)
5240 multirow = get_option_value(document.body[m], "multirow") != ""
5242 endcell = find_token(document.body, "</cell>", begcell)
5244 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
5247 document.body[cvw - 1] == "\\backslash"
5248 and get_containing_inset(document.body, cvw)[0] == "ERT"
5251 # Remove ERTs with cellvarwidth env
5252 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
5254 if document.body[ecvw - 1] == "\\backslash":
5255 eertins = get_containing_inset(document.body, ecvw)
5256 if eertins and eertins[0] == "ERT":
5257 del document.body[eertins[1] : eertins[2] + 1]
5259 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
5260 ertins = get_containing_inset(document.body, cvw)
5261 if ertins and ertins[0] == "ERT":
5262 del document.body[ertins[1] : ertins[2] + 1]
5264 # Convert ERT newlines (as cellvarwidth detection relies on that)
5266 endcell = find_token(document.body, "</cell>", begcell)
5267 nl = find_token(document.body, "\\backslash", begcell, endcell)
5268 if nl == -1 or document.body[nl + 2] != "\\backslash":
5270 ertins = get_containing_inset(document.body, nl)
5271 if ertins and ertins[0] == "ERT":
5272 document.body[ertins[1] : ertins[2] + 1] = [
5273 "\\begin_inset Newline newline",
5278 # Same for linebreaks
5280 endcell = find_token(document.body, "</cell>", begcell)
5281 nl = find_token(document.body, "linebreak", begcell, endcell)
5282 if nl == -1 or document.body[nl - 1] != "\\backslash":
5284 ertins = get_containing_inset(document.body, nl)
5285 if ertins and ertins[0] == "ERT":
5286 document.body[ertins[1] : ertins[2] + 1] = [
5287 "\\begin_inset Newline linebreak",
5293 if multirow == True:
5294 endcell = find_token(document.body, "</cell>", begcell)
5295 nl = find_token(document.body, "endgraf{}", begcell, endcell)
5296 if nl == -1 or document.body[nl - 1] != "\\backslash":
5298 ertins = get_containing_inset(document.body, nl)
5299 if ertins and ertins[0] == "ERT":
5300 document.body[ertins[1] : ertins[2] + 1] = [
5303 "\\begin_layout Plain Layout",
5313 "% Added by lyx2lyx",
5314 "%% Variable width box for table cells",
5315 r"\newenvironment{cellvarwidth}[1][t]",
5316 r" {\begin{varwidth}[#1]{\linewidth}}",
5317 r" {\@finalstrut\@arstrutbox\end{varwidth}}",
5320 del_complete_lines(document.preamble, ["% Added by lyx2lyx", r"\usepackage{varwidth}"])
5323 frontispiece_def = [
5324 r"### Inserted by lyx2lyx (frontispiece layout) ###",
5325 r"Style Frontispiece",
5326 r" CopyStyle Titlehead",
5327 r" LatexName frontispiece",
5332 def convert_koma_frontispiece(document):
5333 """Remove local KOMA frontispiece definition"""
5334 if document.textclass[:3] != "scr":
5337 if document.del_local_layout(frontispiece_def):
5338 document.add_module("ruby")
5341 def revert_koma_frontispiece(document):
5342 """Add local KOMA frontispiece definition"""
5343 if document.textclass[:3] != "scr":
5346 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
5347 document.append_local_layout(frontispiece_def)
5350 def revert_spellchecker_ignore(document):
5351 """Revert document spellchecker dictionary"""
5353 i = find_token(document.header, "\\spellchecker_ignore")
5356 del document.header[i]
5359 def revert_docbook_mathml_prefix(document):
5360 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
5362 i = find_token(document.header, "\\docbook_mathml_prefix")
5365 del document.header[i]
5368 def revert_document_metadata(document):
5369 """Revert document metadata"""
5372 i = find_token(document.header, "\\begin_metadata", i)
5375 j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
5377 # this should not happen
5379 document.header[i : j + 1] = []
5382 def revert_index_macros(document):
5383 "Revert inset index macros"
5387 # trailing blank needed here to exclude IndexMacro insets
5388 i = find_token(document.body, "\\begin_inset Index ", i + 1)
5391 j = find_end_of_inset(document.body, i)
5394 "Malformed LyX document: Can't find end of index inset at line %d" % i
5397 pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
5400 "Malformed LyX document: Can't find plain layout in index inset at line %d" % i
5403 # find, store and remove inset params
5404 pr = find_token(document.body, "range", i, pl)
5405 prval = get_quoted_value(document.body, "range", pr)
5407 if prval == "start":
5409 elif prval == "end":
5411 pf = find_token(document.body, "pageformat", i, pl)
5412 pageformat = get_quoted_value(document.body, "pageformat", pf)
5413 del document.body[pr : pf + 1]
5414 # Now re-find (potentially moved) inset end again, and search for subinsets
5415 j = find_end_of_inset(document.body, i)
5418 "Malformed LyX document: Can't find end of index inset at line %d" % i
5421 # We search for all possible subentries in turn, store their
5422 # content and delete them
5428 # Two subentries are allowed, thus the duplication
5429 imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
5430 for imacro in imacros:
5431 iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
5434 iime = find_end_of_inset(document.body, iim)
5437 "Malformed LyX document: Can't find end of index macro inset at line %d" % i
5440 iimpl = find_token(document.body, "\\begin_layout Plain Layout", iim, iime)
5443 "Malformed LyX document: Can't find plain layout in index macro inset at line %d"
5447 iimple = find_end_of_layout(document.body, iimpl)
5450 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5454 icont = document.body[iimpl:iimple]
5455 if imacro == "seealso":
5457 elif imacro == "see":
5459 elif imacro == "subentry":
5460 # subentries might hace their own sortkey!
5462 document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple
5465 xiime = find_end_of_inset(document.body, xiim)
5468 "Malformed LyX document: Can't find end of index macro inset at line %d"
5472 xiimpl = find_token(
5473 document.body, "\\begin_layout Plain Layout", xiim, xiime
5477 "Malformed LyX document: Can't find plain layout in index macro inset at line %d"
5481 xiimple = find_end_of_layout(document.body, xiimpl)
5484 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5489 xicont = document.body[xiimpl + 1 : xiimple]
5490 # everything before ................... or after
5492 document.body[iimpl + 1 : xiim]
5493 + document.body[xiime + 1 : iimple]
5495 # construct the latex sequence
5496 icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
5497 if len(subentry) > 0:
5498 if icont[0] == "\\begin_layout Plain Layout":
5499 subentry2 = icont[1:]
5503 if icont[0] == "\\begin_layout Plain Layout":
5504 subentry = icont[1:]
5507 elif imacro == "sortkey":
5509 # Everything stored. Delete subinset.
5510 del document.body[iim : iime + 1]
5511 # Again re-find (potentially moved) index inset end
5512 j = find_end_of_inset(document.body, i)
5515 "Malformed LyX document: Can't find end of index inset at line %d" % i
5518 # Now insert all stuff, starting from the inset end
5519 pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
5522 "Malformed LyX document: Can't find plain layout in index inset at line %d" % i
5525 ple = find_end_of_layout(document.body, pl)
5528 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5533 document.body[ple:ple] = (
5534 put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
5536 elif len(seealso) > 0:
5537 document.body[ple:ple] = (
5538 put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
5540 elif pageformat != "default":
5541 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
5542 if len(subentry2) > 0:
5543 document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
5544 if len(subentry) > 0:
5545 document.body[ple:ple] = put_cmd_in_ert("!") + subentry
5546 if len(sortkey) > 0:
5547 document.body[pl : pl + 1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
5550 def revert_starred_refs(document):
5551 "Revert starred refs"
5552 i = find_token(document.header, "\\use_hyperref true", 0)
5553 use_hyperref = i != -1
5561 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
5565 end = find_end_of_inset(document.body, i)
5568 "Malformed LyX document: Can't find end of inset at line %d" % i
5572 # If we are not using hyperref, then we just need to delete the line
5573 if not use_hyperref:
5574 k = find_token(document.body, "nolink", i, end)
5578 del document.body[k]
5581 # If we are using hyperref, then we'll need to do more.
5585 # so we are in an InsetRef
5588 # If nolink is False, just remove that line
5589 if nolink == False or cmd == "formatted" or cmd == "labelonly":
5590 # document.warning("Skipping " + cmd + " " + ref)
5591 if nolinkline != -1:
5592 del document.body[nolinkline]
5595 # We need to construct a new command and put it in ERT
5596 newcmd = "\\" + cmd + "*{" + ref + "}"
5597 # document.warning(newcmd)
5598 newlines = put_cmd_in_ert(newcmd)
5599 document.body[start : end + 1] = newlines
5600 i += len(newlines) - (end - start) + 1
5606 l = document.body[i]
5607 if l.startswith("LatexCommand"):
5609 elif l.startswith("reference"):
5611 elif l.startswith("nolink"):
5613 nolink = tmp == "true"
5618 def convert_starred_refs(document):
5619 "Convert starred refs"
5622 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
5625 end = find_end_of_inset(document.body, i)
5627 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
5631 document.body.insert(newlineat, 'nolink "false"')
5635 def revert_familydefault(document):
5636 "Revert \\font_default_family for non-TeX fonts"
5638 if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
5641 i = find_token(document.header, "\\font_default_family", 0)
5643 document.warning("Malformed LyX document: Can't find \\font_default_family header")
5646 dfamily = get_value(document.header, "\\font_default_family", i)
5647 if dfamily == "default":
5650 document.header[i] = "\\font_default_family default"
5651 add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
5654 def convert_hyper_other(document):
5655 'Classify "run:" links as other'
5659 i = find_token(document.body, "\\begin_inset CommandInset href", i)
5662 j = find_end_of_inset(document.body, i)
5664 document.warning("Cannot find end of inset at line " << str(i))
5667 k = find_token(document.body, 'type "', i, j)
5669 # not a "Web" type. Continue.
5672 t = find_token(document.body, "target", i, j)
5674 document.warning("Malformed hyperlink inset at line " + str(i))
5677 if document.body[t][8:12] == "run:":
5678 document.body.insert(t, 'type "other"')
5682 def revert_hyper_other(document):
5683 'Revert other link type to ERT and "run:" to Web'
5687 i = find_token(document.body, "\\begin_inset CommandInset href", i)
5690 j = find_end_of_inset(document.body, i)
5692 document.warning("Cannot find end of inset at line " << str(i))
5695 k = find_token(document.body, 'type "other"', i, j)
5700 n = find_token(document.body, "name", i, j)
5701 t = find_token(document.body, "target", i, j)
5702 if n == -1 or t == -1:
5703 document.warning("Malformed hyperlink inset at line " + str(i))
5706 name = document.body[n][6:-1]
5707 target = document.body[t][8:-1]
5708 if target[:4] == "run:":
5709 del document.body[k]
5711 cmd = r"\href{" + target + "}{" + name + "}"
5712 ecmd = put_cmd_in_ert(cmd)
5713 document.body[i : j + 1] = ecmd
5718 "aa": "Acknowledgments",
5719 "aapaper": "Acknowledgments",
5720 "aastex": "Acknowledgments",
5721 "aastex62": "Acknowledgments",
5722 "achemso": "Acknowledgments",
5723 "acmart": "Acknowledgments",
5724 "AEA": "Acknowledgments",
5725 "apa": "Acknowledgments",
5726 "copernicus": "Acknowledgments",
5727 "egs": "Acknowledgments", # + Acknowledgment
5728 "elsart": "Acknowledgment",
5729 "isprs": "Acknowledgments",
5730 "iucr": "Acknowledgments",
5731 "kluwer": "Acknowledgments",
5732 "svglobal3": "Acknowledgments",
5733 "svglobal": "Acknowledgment",
5734 "svjog": "Acknowledgment",
5735 "svmono": "Acknowledgment",
5736 "svmult": "Acknowledgment",
5737 "svprobth": "Acknowledgment",
5741 "aa": "Acknowledgement",
5742 "aapaper": "Acknowledgement",
5743 "aastex": "Acknowledgement",
5744 "aastex62": "Acknowledgement",
5745 "achemso": "Acknowledgement",
5746 "acmart": "Acknowledgements",
5747 "AEA": "Acknowledgement",
5748 "apa": "Acknowledgements",
5749 "copernicus": "Acknowledgements",
5750 "egs": "Acknowledgements", # + Acknowledgement
5751 "elsart": "Acknowledegment",
5752 "isprs": "Acknowledgements",
5753 "iucr": "Acknowledgements",
5754 "kluwer": "Acknowledgements",
5755 "svglobal3": "Acknowledgements",
5756 "svglobal": "Acknowledgement",
5757 "svjog": "Acknowledgement",
5758 "svmono": "Acknowledgement",
5759 "svmult": "Acknowledgement",
5760 "svprobth": "Acknowledgement",
5764 def convert_acknowledgment(document):
5765 "Fix spelling of acknowledgment styles"
5767 if document.textclass not in list(ack_layouts_old.keys()):
5773 document.body, "\\begin_layout " + ack_layouts_old[document.textclass], i
5777 document.body[i] = "\\begin_layout " + ack_layouts_new[document.textclass]
5778 if document.textclass != "egs":
5780 # egs has two styles
5783 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5786 document.body[i] = "\\begin_layout Acknowledgment"
5789 def revert_acknowledgment(document):
5790 "Restore old spelling of acknowledgment styles"
5792 if document.textclass not in list(ack_layouts_new.keys()):
5797 document.body, "\\begin_layout " + ack_layouts_new[document.textclass], i
5801 document.body[i] = "\\begin_layout " + ack_layouts_old[document.textclass]
5802 if document.textclass != "egs":
5804 # egs has two styles
5807 i = find_token(document.body, "\\begin_layout Acknowledgment", i)
5810 document.body[i] = "\\begin_layout Acknowledgement"
5814 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5815 r"### This requires theorems-ams-extended module to be loaded",
5816 r"Style Acknowledgement",
5817 r" CopyStyle Remark",
5818 r" LatexName acknowledgement",
5819 r' LabelString "Acknowledgement \thetheorem."',
5821 r" \theoremstyle{remark}",
5822 r" \newtheorem{acknowledgement}[thm]{\protect\acknowledgementname}",
5825 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5826 r" EndLangPreamble",
5828 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5829 r" EndBabelPreamble",
5830 r" DocBookTag para",
5831 r' DocBookAttr role="acknowledgement"',
5832 r' DocBookItemTag ""',
5836 ackStar_theorem_def = [
5837 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5838 r"### This requires a theorems-ams-extended-* module to be loaded",
5839 r"Style Acknowledgement*",
5840 r" CopyStyle Remark*",
5841 r" LatexName acknowledgement*",
5842 r' LabelString "Acknowledgement."',
5844 r" \theoremstyle{remark}",
5845 r" \newtheorem*{acknowledgement*}{\protect\acknowledgementname}",
5848 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5849 r" EndLangPreamble",
5851 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5852 r" EndBabelPreamble",
5853 r" DocBookTag para",
5854 r' DocBookAttr role="acknowledgement"',
5855 r' DocBookItemTag ""',
5859 ack_bytype_theorem_def = [
5860 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5861 r"### This requires theorems-ams-extended-bytype module to be loaded",
5862 r"Counter acknowledgement",
5863 r" GuiName Acknowledgment",
5865 r"Style Acknowledgement",
5866 r" CopyStyle Remark",
5867 r" LatexName acknowledgement",
5868 r' LabelString "Acknowledgement \theacknowledgement."',
5870 r" \theoremstyle{remark}",
5871 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
5874 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5875 r" EndLangPreamble",
5877 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5878 r" EndBabelPreamble",
5879 r" DocBookTag para",
5880 r' DocBookAttr role="acknowledgement"',
5881 r' DocBookItemTag ""',
5885 ack_chap_bytype_theorem_def = [
5886 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5887 r"### This requires theorems-ams-extended-chap-bytype module to be loaded",
5888 r"Counter acknowledgement",
5889 r" GuiName Acknowledgment",
5892 r"Style Acknowledgement",
5893 r" CopyStyle Remark",
5894 r" LatexName acknowledgement",
5895 r' LabelString "Acknowledgement \theacknowledgement."',
5897 r" \theoremstyle{remark}",
5898 r" \ifx\thechapter\undefined",
5899 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
5901 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}[chapter]",
5905 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5906 r" EndLangPreamble",
5908 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5909 r" EndBabelPreamble",
5910 r" DocBookTag para",
5911 r' DocBookAttr role="acknowledgement"',
5912 r' DocBookItemTag ""',
5917 def convert_ack_theorems(document):
5918 """Put removed acknowledgement theorems to local layout"""
5922 if "theorems-ams-extended-bytype" in document.get_module_list():
5925 if haveAck and haveStarAck:
5927 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5930 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5931 document.append_local_layout(ackStar_theorem_def)
5934 document.append_local_layout(ack_bytype_theorem_def)
5937 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
5940 if haveAck and haveStarAck:
5942 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5945 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5946 document.append_local_layout(ackStar_theorem_def)
5949 document.append_local_layout(ack_chap_bytype_theorem_def)
5952 elif "theorems-ams-extended" in document.get_module_list():
5955 if haveAck and haveStarAck:
5957 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5960 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5961 document.append_local_layout(ackStar_theorem_def)
5964 document.append_local_layout(ack_theorem_def)
5969 def revert_ack_theorems(document):
5970 """Remove acknowledgement theorems from local layout"""
5971 if "theorems-ams-extended-bytype" in document.get_module_list():
5972 document.del_local_layout(ackStar_theorem_def)
5973 document.del_local_layout(ack_bytype_theorem_def)
5974 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
5975 document.del_local_layout(ackStar_theorem_def)
5976 document.del_local_layout(ack_chap_bytype_theorem_def)
5977 elif "theorems-ams-extended" in document.get_module_list():
5978 document.del_local_layout(ackStar_theorem_def)
5979 document.del_local_layout(ack_theorem_def)
5982 def revert_empty_macro(document):
5983 """Remove macros with empty LaTeX part"""
5986 i = find_token(document.body, "\\begin_inset FormulaMacro", i)
5989 cmd = document.body[i + 1]
5990 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
5993 j = find_end_of_inset(document.body, i)
5994 document.body[i : j + 1] = []
5997 def convert_empty_macro(document):
5998 """In the unlikely event someone defined a macro with empty LaTeX, add {}"""
6001 i = find_token(document.body, "\\begin_inset FormulaMacro", i)
6004 cmd = document.body[i + 1]
6005 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
6008 newstr = cmd[:-2] + "{\\{\\}}"
6009 document.body[i + 1] = newstr
6013 def convert_cov_options(document):
6014 """Update examples item argument structure"""
6016 if "linguistics" not in document.get_module_list():
6019 layouts = ["Numbered Examples (consecutive)", "Subexample"]
6021 for layout in layouts:
6024 i = find_token(document.body, "\\begin_layout %s" % layout, i)
6027 j = find_end_of_layout(document.body, i)
6030 "Malformed LyX document: Can't find end of example layout at line %d" % i
6034 k = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6036 document.body[k] = "\\begin_inset Argument item:2"
6038 # Shift gloss arguments
6041 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
6044 j = find_end_of_inset(document.body, i)
6047 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6051 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6053 document.body[k] = "\\begin_inset Argument post:4"
6054 k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6056 document.body[k] = "\\begin_inset Argument post:2"
6061 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
6064 j = find_end_of_inset(document.body, i)
6067 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6071 k = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6073 document.body[k] = "\\begin_inset Argument post:6"
6074 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6076 document.body[k] = "\\begin_inset Argument post:4"
6077 k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6079 document.body[k] = "\\begin_inset Argument post:2"
6083 def revert_linggloss2(document):
6084 "Revert gloss with new args to ERT"
6086 if "linguistics" not in document.get_module_list():
6091 "\\begin_inset Flex Interlinear Gloss (2 Lines)",
6092 "\\begin_inset Flex Interlinear Gloss (3 Lines)",
6094 for glosse in glosses:
6097 i = find_token(document.body, glosse, i + 1)
6100 j = find_end_of_inset(document.body, i)
6102 document.warning("Malformed LyX document: Can't find end of Gloss inset")
6105 # Check if we have new options
6106 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6108 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6110 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
6115 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
6116 endarg = find_end_of_inset(document.body, arg)
6119 argbeginPlain = find_token(
6120 document.body, "\\begin_layout Plain Layout", arg, endarg
6122 if argbeginPlain == -1:
6123 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6125 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6126 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
6128 # remove Arg insets and paragraph, if it only contains this inset
6130 document.body[arg - 1] == "\\begin_layout Plain Layout"
6131 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6133 del document.body[arg - 1 : endarg + 4]
6135 del document.body[arg : endarg + 1]
6137 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6138 endarg = find_end_of_inset(document.body, arg)
6141 argbeginPlain = find_token(
6142 document.body, "\\begin_layout Plain Layout", arg, endarg
6144 if argbeginPlain == -1:
6145 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
6147 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6148 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
6150 # remove Arg insets and paragraph, if it only contains this inset
6152 document.body[arg - 1] == "\\begin_layout Plain Layout"
6153 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6155 del document.body[arg - 1 : endarg + 4]
6157 del document.body[arg : endarg + 1]
6159 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6160 endarg = find_end_of_inset(document.body, arg)
6163 argbeginPlain = find_token(
6164 document.body, "\\begin_layout Plain Layout", arg, endarg
6166 if argbeginPlain == -1:
6167 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
6169 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6170 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
6172 # remove Arg insets and paragraph, if it only contains this inset
6174 document.body[arg - 1] == "\\begin_layout Plain Layout"
6175 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6177 del document.body[arg - 1 : endarg + 4]
6179 del document.body[arg : endarg + 1]
6181 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6182 endarg = find_end_of_inset(document.body, arg)
6185 argbeginPlain = find_token(
6186 document.body, "\\begin_layout Plain Layout", arg, endarg
6188 if argbeginPlain == -1:
6189 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
6191 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6192 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
6194 # remove Arg insets and paragraph, if it only contains this inset
6196 document.body[arg - 1] == "\\begin_layout Plain Layout"
6197 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6199 del document.body[arg - 1 : endarg + 4]
6201 del document.body[arg : endarg + 1]
6203 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6204 endarg = find_end_of_inset(document.body, arg)
6207 argbeginPlain = find_token(
6208 document.body, "\\begin_layout Plain Layout", arg, endarg
6210 if argbeginPlain == -1:
6211 document.warning("Malformed LyX document: Can't find arg 4 plain Layout")
6213 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6214 marg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
6216 # remove Arg insets and paragraph, if it only contains this inset
6218 document.body[arg - 1] == "\\begin_layout Plain Layout"
6219 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6221 del document.body[arg - 1 : endarg + 4]
6223 del document.body[arg : endarg + 1]
6225 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
6226 endarg = find_end_of_inset(document.body, arg)
6229 argbeginPlain = find_token(
6230 document.body, "\\begin_layout Plain Layout", arg, endarg
6232 if argbeginPlain == -1:
6233 document.warning("Malformed LyX document: Can't find arg 5 plain Layout")
6235 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6236 marg5content = document.body[argbeginPlain + 1 : argendPlain - 2]
6238 # remove Arg insets and paragraph, if it only contains this inset
6240 document.body[arg - 1] == "\\begin_layout Plain Layout"
6241 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6243 del document.body[arg - 1 : endarg + 4]
6245 del document.body[arg : endarg + 1]
6247 arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
6248 endarg = find_end_of_inset(document.body, arg)
6251 argbeginPlain = find_token(
6252 document.body, "\\begin_layout Plain Layout", arg, endarg
6254 if argbeginPlain == -1:
6255 document.warning("Malformed LyX document: Can't find arg 6 plain Layout")
6257 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6258 marg6content = document.body[argbeginPlain + 1 : argendPlain - 2]
6260 # remove Arg insets and paragraph, if it only contains this inset
6262 document.body[arg - 1] == "\\begin_layout Plain Layout"
6263 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6265 del document.body[arg - 1 : endarg + 4]
6267 del document.body[arg : endarg + 1]
6270 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
6273 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
6274 endInset = find_end_of_inset(document.body, i)
6275 endPlain = find_end_of_layout(document.body, beginPlain)
6276 precontent = put_cmd_in_ert(cmd)
6277 if len(optargcontent) > 0:
6278 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
6279 precontent += put_cmd_in_ert("{")
6281 postcontent = put_cmd_in_ert("}")
6282 if len(marg1content) > 0:
6283 postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
6284 postcontent += put_cmd_in_ert("{") + marg2content + put_cmd_in_ert("}")
6285 if len(marg3content) > 0:
6286 postcontent += put_cmd_in_ert("[") + marg3content + put_cmd_in_ert("]")
6287 postcontent += put_cmd_in_ert("{") + marg4content + put_cmd_in_ert("}")
6288 if cmd == "\\trigloss":
6289 if len(marg5content) > 0:
6290 postcontent += put_cmd_in_ert("[") + marg5content + put_cmd_in_ert("]")
6291 postcontent += put_cmd_in_ert("{") + marg6content + put_cmd_in_ert("}")
6293 document.body[endPlain : endInset + 1] = postcontent
6294 document.body[beginPlain + 1 : beginPlain] = precontent
6295 del document.body[i : beginPlain + 1]
6297 document.append_local_layout("Requires covington")
6302 def revert_exarg2(document):
6303 "Revert linguistic examples with new arguments to ERT"
6305 if "linguistics" not in document.get_module_list():
6310 layouts = ["Numbered Example", "Subexample"]
6312 for layout in layouts:
6315 i = find_token(document.body, "\\begin_layout %s" % layout, i + 1)
6318 j = find_end_of_layout(document.body, i)
6320 document.warning("Malformed LyX document: Can't find end of example layout")
6322 consecex = document.body[i] == "\\begin_layout Numbered Examples (consecutive)"
6323 subexpl = document.body[i] == "\\begin_layout Subexample"
6324 singleex = document.body[i] == "\\begin_layout Numbered Examples (multiline)"
6325 layouttype = "\\begin_layout Numbered Examples (multiline)"
6327 layouttype = "\\begin_layout Numbered Examples (consecutive)"
6329 layouttype = "\\begin_layout Subexample"
6335 m = find_end_of_layout(document.body, k)
6336 # check for consecutive layouts
6337 k = find_token(document.body, "\\begin_layout", m)
6338 if k == -1 or document.body[k] != layouttype:
6340 l = find_end_of_layout(document.body, k)
6342 document.warning("Malformed LyX document: Can't find end of example layout")
6345 arg = find_token(document.body, "\\begin_inset Argument 1", i, l)
6349 != "\\begin_layout " + get_containing_layout(document.body, arg)[0]
6351 # this is not our argument!
6353 if subexpl or arg == -1:
6354 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, l)
6359 endarg = find_end_of_inset(document.body, arg)
6361 argbeginPlain = find_token(
6362 document.body, "\\begin_layout Plain Layout", arg, endarg
6364 if argbeginPlain == -1:
6365 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6367 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6368 optargcontent = lyx2latex(
6369 document, document.body[argbeginPlain + 1 : argendPlain - 2]
6371 # This is a verbatim argument
6372 optargcontent = re.sub(r"textbackslash{}", r"", optargcontent)
6375 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6377 endiarg = find_end_of_inset(document.body, iarg)
6379 iargbeginPlain = find_token(
6380 document.body, "\\begin_layout Plain Layout", iarg, endiarg
6382 if iargbeginPlain == -1:
6383 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6385 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
6387 "<" + lyx2latex(document, document.body[iargbeginPlain:iargendPlain]) + ">"
6390 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6392 endiarg2 = find_end_of_inset(document.body, iarg2)
6394 iarg2beginPlain = find_token(
6395 document.body, "\\begin_layout Plain Layout", iarg2, endiarg2
6397 if iarg2beginPlain == -1:
6398 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6400 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
6403 + lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
6410 # remove Arg insets and paragraph, if it only contains this inset
6413 document.body[arg - 1] == "\\begin_layout Plain Layout"
6414 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6416 del document.body[arg - 1 : endarg + 4]
6418 del document.body[arg : endarg + 1]
6420 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6422 document.warning("Unable to re-find item:1 Argument")
6424 endiarg = find_end_of_inset(document.body, iarg)
6426 document.body[iarg - 1] == "\\begin_layout Plain Layout"
6427 and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
6429 del document.body[iarg - 1 : endiarg + 4]
6431 del document.body[iarg : endiarg + 1]
6433 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6435 document.warning("Unable to re-find item:2 Argument")
6437 endiarg2 = find_end_of_inset(document.body, iarg2)
6439 document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
6440 and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
6442 del document.body[iarg2 - 1 : endiarg2 + 4]
6444 del document.body[iarg2 : endiarg2 + 1]
6448 envname = "examples"
6450 envname = "subexamples"
6452 cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
6454 # re-find end of layout
6455 j = find_end_of_layout(document.body, i)
6457 document.warning("Malformed LyX document: Can't find end of Subexample layout")
6461 # check for consecutive layouts
6462 k = find_token(document.body, "\\begin_layout", l)
6463 if k == -1 or document.body[k] != layouttype:
6467 m = find_end_of_layout(document.body, k)
6468 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
6470 endiarg = find_end_of_inset(document.body, iarg)
6472 iargbeginPlain = find_token(
6473 document.body, "\\begin_layout Plain Layout", iarg, endiarg
6475 if iargbeginPlain == -1:
6477 "Malformed LyX document: Can't find optarg plain Layout"
6480 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
6483 + lyx2latex(document, document.body[iargbeginPlain:iargendPlain])
6487 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
6489 endiarg2 = find_end_of_inset(document.body, iarg2)
6491 iarg2beginPlain = find_token(
6493 "\\begin_layout Plain Layout",
6497 if iarg2beginPlain == -1:
6499 "Malformed LyX document: Can't find optarg plain Layout"
6502 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
6505 + lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
6509 if subitemarg == "":
6511 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert(
6512 "\\item" + subitemarg
6514 # Refind and remove arg insets
6516 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
6518 document.warning("Unable to re-find item:1 Argument")
6520 endiarg = find_end_of_inset(document.body, iarg)
6522 document.body[iarg - 1] == "\\begin_layout Plain Layout"
6523 and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
6525 del document.body[iarg - 1 : endiarg + 4]
6527 del document.body[iarg : endiarg + 1]
6529 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
6531 document.warning("Unable to re-find item:2 Argument")
6533 endiarg2 = find_end_of_inset(document.body, iarg2)
6535 document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
6536 and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
6538 del document.body[iarg2 - 1 : endiarg2 + 4]
6540 del document.body[iarg2 : endiarg2 + 1]
6542 document.body[k : k + 1] = ["\\begin_layout Standard"]
6543 l = find_end_of_layout(document.body, k)
6545 document.warning("Malformed LyX document: Can't find end of example layout")
6548 endev = put_cmd_in_ert("\\end{" + envname + "}")
6550 document.body[l:l] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
6551 document.body[i : i + 1] = (
6552 ["\\begin_layout Standard"]
6554 + ["\\end_layout", "", "\\begin_layout Standard"]
6555 + put_cmd_in_ert("\\item" + itemarg)
6558 document.append_local_layout("Requires covington")
6562 def revert_cov_options(document):
6563 """Revert examples item argument structure"""
6565 if "linguistics" not in document.get_module_list():
6568 layouts = ["Numbered Examples (consecutive)", "Subexample"]
6570 for layout in layouts:
6573 i = find_token(document.body, "\\begin_layout %s" % layout, i)
6576 j = find_end_of_layout(document.body, i)
6579 "Malformed LyX document: Can't find end of example layout at line %d" % i
6583 k = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6585 document.body[k] = "\\begin_inset Argument item:1"
6587 # Shift gloss arguments
6590 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
6593 j = find_end_of_inset(document.body, i)
6596 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6600 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6602 document.body[k] = "\\begin_inset Argument post:1"
6603 k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6605 document.body[k] = "\\begin_inset Argument post:2"
6610 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
6613 j = find_end_of_inset(document.body, i)
6616 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6620 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6622 document.body[k] = "\\begin_inset Argument post:1"
6623 k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6625 document.body[k] = "\\begin_inset Argument post:2"
6626 k = find_token(document.body, "\\begin_inset Argument post:6", i, j)
6628 document.body[k] = "\\begin_inset Argument post:3"
6632 def revert_expreambles(document):
6633 """Revert covington example preamble flex insets to ERT"""
6635 revert_flex_inset(document, "Example Preamble", "\\expreamble")
6636 revert_flex_inset(document, "Subexample Preamble", "\\subexpreamble")
6637 revert_flex_inset(document, "Example Postamble", "\\expostamble")
6638 revert_flex_inset(document, "Subexample Postamble", "\\subexpostamble")
6641 def revert_hequotes(document):
6642 "Revert Hebrew Quotation marks"
6644 i = find_token(document.header, "\\quotes_style hebrew", 0)
6646 document.header[i] = "\\quotes_style english"
6650 i = find_token(document.body, "\\begin_inset Quotes d")
6653 if document.body[i] == "\\begin_inset Quotes dld":
6654 document.body[i] = "\\begin_inset Quotes prd"
6655 elif document.body[i] == "\\begin_inset Quotes drd":
6656 document.body[i] = "\\begin_inset Quotes pld"
6657 elif document.body[i] == "\\begin_inset Quotes dls":
6658 document.body[i] = "\\begin_inset Quotes prd"
6659 elif document.body[i] == "\\begin_inset Quotes drs":
6660 document.body[i] = "\\begin_inset Quotes pld"
6663 def revert_formatted_refs(document):
6664 i = find_token(document.header, "\\use_formatted_ref", 0)
6666 del document.header[i]
6669 def revert_box_fcolor(document):
6672 i = find_token(document.body, "\\begin_inset Box Boxed", i + 1)
6675 j = find_end_of_inset(document.body, i)
6678 "Malformed LyX document: Can't find end of framed box inset at line %d" % i
6681 k = find_token(document.body, 'framecolor "default"', i, j)
6683 document.body[k] = 'framecolor "black"'
6690 supported_versions = ["2.4.0", "2.4"]
6692 [545, [convert_lst_literalparam]],
6697 [550, [convert_fontenc]],
6704 [557, [convert_vcsinfo]],
6705 [558, [removeFrontMatterStyles]],
6708 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
6712 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
6713 [566, [convert_hebrew_parentheses]],
6719 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
6720 [573, [convert_inputencoding_namechange]],
6721 [574, [convert_ruby_module, convert_utf8_japanese]],
6722 [575, [convert_lineno, convert_aaencoding]],
6724 [577, [convert_linggloss]],
6728 [581, [convert_osf]],
6735 convert_CantarellFont,
6738 ], # old font re-converterted due to extra options
6744 convert_NotoRegulars,
6745 convert_CrimsonProFont,
6749 [585, [convert_pagesizes]],
6751 [587, [convert_pagesizenames]],
6753 [589, [convert_totalheight]],
6754 [590, [convert_changebars]],
6755 [591, [convert_postpone_fragile]],
6757 [593, [convert_counter_maintenance]],
6760 [596, [convert_parskip]],
6761 [597, [convert_libertinus_rm_fonts]],
6765 [601, [convert_math_refs]],
6766 [602, [convert_branch_colors]],
6769 [605, [convert_vcolumns2]],
6770 [606, [convert_koma_frontispiece]],
6776 [612, [convert_starred_refs]],
6778 [614, [convert_hyper_other]],
6779 [615, [convert_acknowledgment, convert_ack_theorems]],
6780 [616, [convert_empty_macro]],
6781 [617, [convert_cov_options]],
6789 [619, [revert_box_fcolor]],
6790 [618, [revert_formatted_refs]],
6791 [617, [revert_hequotes]],
6792 [616, [revert_expreambles, revert_exarg2, revert_linggloss2, revert_cov_options]],
6793 [615, [revert_empty_macro]],
6794 [614, [revert_ack_theorems, revert_acknowledgment]],
6795 [613, [revert_hyper_other]],
6796 [612, [revert_familydefault]],
6797 [611, [revert_starred_refs]],
6799 [609, [revert_index_macros]],
6800 [608, [revert_document_metadata]],
6801 [607, [revert_docbook_mathml_prefix]],
6802 [606, [revert_spellchecker_ignore]],
6803 [605, [revert_koma_frontispiece]],
6804 [604, [revert_vcolumns2]],
6805 [603, [revert_branch_darkcols]],
6806 [602, [revert_darkmode_graphics]],
6807 [601, [revert_branch_colors]],
6809 [599, [revert_math_refs]],
6810 [598, [revert_hrquotes]],
6811 [598, [revert_nopagebreak]],
6812 [597, [revert_docbook_table_output]],
6813 [596, [revert_libertinus_rm_fonts, revert_libertinus_sftt_fonts]],
6814 [595, [revert_parskip, revert_line_vspaces]],
6815 [594, [revert_ams_spaces]],
6816 [593, [revert_counter_inset]],
6817 [592, [revert_counter_maintenance]],
6818 [591, [revert_colrow_tracking]],
6819 [590, [revert_postpone_fragile]],
6820 [589, [revert_changebars]],
6821 [588, [revert_totalheight]],
6822 [587, [revert_memoir_endnotes, revert_enotez, revert_theendnotes]],
6823 [586, [revert_pagesizenames]],
6824 [585, [revert_dupqualicites]],
6825 [584, [revert_pagesizes, revert_komafontsizes]],
6826 [583, [revert_vcsinfo_rev_abbrev]],
6827 [582, [revert_ChivoFont, revert_CrimsonProFont]],
6828 [581, [revert_CantarellFont, revert_FiraFont]],
6829 [580, [revert_texfontopts, revert_osf]],
6834 revert_plainNotoFonts_xopts,
6835 revert_notoFonts_xopts,
6836 revert_IBMFonts_xopts,
6837 revert_AdobeFonts_xopts,
6840 ], # keep revert_font_opts last!
6841 [578, [revert_babelfont]],
6842 [577, [revert_drs]],
6843 [576, [revert_linggloss, revert_subexarg]],
6844 [575, [revert_new_languages]],
6845 [574, [revert_lineno, revert_aaencoding]],
6846 [573, [revert_ruby_module, revert_utf8_japanese]],
6847 [572, [revert_inputencoding_namechange]],
6848 [571, [revert_notoFonts]],
6849 [570, [revert_cmidruletrimming]],
6850 [569, [revert_bibfileencodings]],
6851 [568, [revert_tablestyle]],
6852 [567, [revert_soul]],
6853 [566, [revert_malayalam]],
6854 [565, [revert_hebrew_parentheses]],
6855 [564, [revert_AdobeFonts]],
6856 [563, [revert_lformatinfo]],
6857 [562, [revert_listpargs]],
6858 [561, [revert_l7ninfo]],
6859 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
6860 [559, [revert_timeinfo, revert_namenoextinfo]],
6861 [558, [revert_dateinfo]],
6862 [557, [addFrontMatterStyles]],
6863 [556, [revert_vcsinfo]],
6864 [555, [revert_bibencoding]],
6865 [554, [revert_vcolumns]],
6866 [553, [revert_stretchcolumn]],
6867 [552, [revert_tuftecite]],
6868 [551, [revert_floatpclass, revert_floatalignment]],
6869 [550, [revert_nospellcheck]],
6870 [549, [revert_fontenc]],
6871 [548, []], # dummy format change
6872 [547, [revert_lscape]],
6873 [546, [revert_xcharter]],
6874 [545, [revert_paratype]],
6875 [544, [revert_lst_literalparam]],
6879 if __name__ == "__main__":