1 # This file is part of lyx2lyx
2 # Copyright (C) 2018 The LyX team
4 # This program is free software; you can redistribute it and/or
5 # modify it under the terms of the GNU General Public License
6 # as published by the Free Software Foundation; either version 2
7 # of the License, or (at your option) any later version.
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software
16 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 """Convert files to the file format generated by lyx 2.4"""
22 from datetime import datetime, date, time
24 from parser_tools import (
37 get_containing_layout,
44 from lyx2lyx_tools import (
54 ####################################################################
55 # Private helper functions
58 def add_preamble_fonts(document, fontmap):
59 """Add collected font-packages with their option to user-preamble"""
62 if len(fontmap[pkg]) > 0:
63 xoption = "[" + ",".join(fontmap[pkg]) + "]"
66 preamble = f"\\usepackage{xoption}{{{pkg}}}"
67 add_to_preamble(document, [preamble])
70 def createkey(pkg, options):
72 return pkg + ":" + "-".join(options)
77 self.fontname = None # key into font2pkgmap
78 self.fonttype = None # roman,sans,typewriter,math
79 self.scaletype = None # None,sf,tt
80 self.scaleopt = None # None, 'scaled', 'scale'
84 self.pkgkey = None # key into pkg2fontmap
85 self.osfopt = None # None, string
86 self.osfdef = "false" # "false" or "true"
89 self.pkgkey = createkey(self.package, self.options)
94 self.font2pkgmap = dict()
95 self.pkg2fontmap = dict()
96 self.pkginmap = dict() # defines, if a map for package exists
98 def expandFontMapping(
108 """Expand fontinfo mapping"""
110 # fontlist: list of fontnames, each element
111 # may contain a ','-separated list of needed options
112 # like e.g. 'IBMPlexSansCondensed,condensed'
113 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
114 # scale_type: one of None, 'sf', 'tt'
115 # pkg: package defining the font. Defaults to fontname if None
116 # scaleopt: one of None, 'scale', 'scaled', or some other string
117 # to be used in scale option (e.g. scaled=0.7)
118 # osfopt: None or some other string to be used in osf option
119 # osfdef: "true" if osf is default
122 fe.fonttype = font_type
123 fe.scaletype = scale_type
126 fe.fontname = font_name
128 fe.scaleopt = scaleopt
132 fe.package = font_name
136 self.font2pkgmap[font_name] = fe
137 if fe.pkgkey in self.pkg2fontmap:
138 # Repeated the same entry? Check content
139 if self.pkg2fontmap[fe.pkgkey] != font_name:
140 document.error("Something is wrong in pkgname+options <-> fontname mapping")
141 self.pkg2fontmap[fe.pkgkey] = font_name
142 self.pkginmap[fe.package] = 1
144 def getfontname(self, pkg, options):
146 pkgkey = createkey(pkg, options)
147 if not pkgkey in self.pkg2fontmap:
149 fontname = self.pkg2fontmap[pkgkey]
150 if not fontname in self.font2pkgmap:
151 document.error("Something is wrong in pkgname+options <-> fontname mapping")
153 if pkgkey == self.font2pkgmap[fontname].pkgkey:
158 def createFontMapping(fontlist):
159 # Create info for known fonts for the use in
160 # convert_latexFonts() and
161 # revert_latexFonts()
163 # * Would be more handy to parse latexFonts file,
164 # but the path to this file is unknown
165 # * For now, add DejaVu and IBMPlex only.
166 # * Expand, if desired
168 for font in fontlist:
170 fm.expandFontMapping(["DejaVuSerif", "DejaVuSerifCondensed"], "roman", None, None)
171 fm.expandFontMapping(
172 ["DejaVuSans", "DejaVuSansCondensed"], "sans", "sf", None, "scaled"
174 fm.expandFontMapping(["DejaVuSansMono"], "typewriter", "tt", None, "scaled")
176 fm.expandFontMapping(
179 "IBMPlexSerifThin,thin",
180 "IBMPlexSerifExtraLight,extralight",
181 "IBMPlexSerifLight,light",
182 "IBMPlexSerifSemibold,semibold",
188 fm.expandFontMapping(
191 "IBMPlexSansCondensed,condensed",
192 "IBMPlexSansThin,thin",
193 "IBMPlexSansExtraLight,extralight",
194 "IBMPlexSansLight,light",
195 "IBMPlexSansSemibold,semibold",
202 fm.expandFontMapping(
205 "IBMPlexMonoThin,thin",
206 "IBMPlexMonoExtraLight,extralight",
207 "IBMPlexMonoLight,light",
208 "IBMPlexMonoSemibold,semibold",
215 elif font == "Adobe":
216 fm.expandFontMapping(
217 ["ADOBESourceSerifPro"], "roman", None, "sourceserifpro", None, "osf"
219 fm.expandFontMapping(
220 ["ADOBESourceSansPro"], "sans", "sf", "sourcesanspro", "scaled", "osf"
222 fm.expandFontMapping(
223 ["ADOBESourceCodePro"],
231 fm.expandFontMapping(
233 "NotoSerifRegular,regular",
234 "NotoSerifMedium,medium",
235 "NotoSerifThin,thin",
236 "NotoSerifLight,light",
237 "NotoSerifExtralight,extralight",
245 fm.expandFontMapping(
247 "NotoSansRegular,regular",
248 "NotoSansMedium,medium",
250 "NotoSansLight,light",
251 "NotoSansExtralight,extralight",
258 fm.expandFontMapping(
259 ["NotoMonoRegular,regular"], "typewriter", "tt", "noto-mono", "scaled"
261 elif font == "Cantarell":
262 fm.expandFontMapping(
263 ["cantarell,defaultsans"],
270 elif font == "Chivo":
271 fm.expandFontMapping(
276 "ChivoMedium,medium",
284 elif font == "CrimsonPro":
285 fm.expandFontMapping(
288 "CrimsonProExtraLight,extralight",
289 "CrimsonProLight,light",
290 "CrimsonProMedium,medium",
300 fm.expandFontMapping(
305 "FiraSansLight,light",
306 "FiraSansExtralight,extralight",
307 "FiraSansUltralight,ultralight",
316 fm.expandFontMapping(
317 ["FiraMono"], "typewriter", "tt", "FiraMono", "scaled", "lf", "true"
319 elif font == "libertinus":
320 fm.expandFontMapping(["libertinus,serif"], "roman", None, "libertinus", None, "osf")
321 fm.expandFontMapping(
322 ["libertinusmath"], "math", None, "libertinust1math", None, None
327 def convert_fonts(document, fm, osfoption="osf"):
328 """Handle font definition (LaTeX preamble -> native)"""
329 rpkg = re.compile(r"^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}")
330 rscaleopt = re.compile(r"^scaled?=(.*)")
332 # Check whether we go beyond font option feature introduction
333 haveFontOpts = document.end_format > 580
337 i = find_re(document.preamble, rpkg, i + 1)
340 mo = rpkg.search(document.preamble[i])
341 if mo == None or mo.group(2) == None:
344 options = mo.group(2).replace(" ", "").split(",")
349 while o < len(options):
350 if options[o] == osfoption:
354 mo = rscaleopt.search(options[o])
362 if not pkg in fm.pkginmap:
367 # Try with name-option combination first
368 # (only one default option supported currently)
370 while o < len(options):
372 fn = fm.getfontname(pkg, [opt])
379 fn = fm.getfontname(pkg, [])
381 fn = fm.getfontname(pkg, options)
384 del document.preamble[i]
385 fontinfo = fm.font2pkgmap[fn]
386 if fontinfo.scaletype == None:
389 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
390 fontinfo.scaleval = oscale
391 if (has_osf and fontinfo.osfdef == "false") or (
392 not has_osf and fontinfo.osfdef == "true"
394 if fontinfo.osfopt == None:
395 options.extend(osfoption)
397 osf = find_token(document.header, "\\font_osf false")
398 osftag = "\\font_osf"
399 if osf == -1 and fontinfo.fonttype != "math":
400 # Try with newer format
401 osftag = "\\font_" + fontinfo.fonttype + "_osf"
402 osf = find_token(document.header, osftag + " false")
404 document.header[osf] = osftag + " true"
405 if i > 0 and document.preamble[i - 1] == "% Added by lyx2lyx":
406 del document.preamble[i - 1]
408 if fontscale != None:
409 j = find_token(document.header, fontscale, 0)
411 val = get_value(document.header, fontscale, j)
415 scale = "%03d" % int(float(oscale) * 100)
416 document.header[j] = fontscale + " " + scale + " " + vals[1]
417 ft = "\\font_" + fontinfo.fonttype
418 j = find_token(document.header, ft, 0)
420 val = get_value(document.header, ft, j)
421 words = val.split() # ! splits also values like '"DejaVu Sans"'
422 words[0] = '"' + fn + '"'
423 document.header[j] = ft + " " + " ".join(words)
424 if haveFontOpts and fontinfo.fonttype != "math":
425 fotag = "\\font_" + fontinfo.fonttype + "_opts"
426 fo = find_token(document.header, fotag)
428 document.header[fo] = fotag + ' "' + ",".join(options) + '"'
430 # Sensible place to insert tag
431 fo = find_token(document.header, "\\font_sf_scale")
433 document.warning("Malformed LyX document! Missing \\font_sf_scale")
435 document.header.insert(fo, fotag + ' "' + ",".join(options) + '"')
438 def revert_fonts(document, fm, fontmap, OnlyWithXOpts=False, WithXOpts=False):
439 """Revert native font definition to LaTeX"""
440 # fonlist := list of fonts created from the same package
441 # Empty package means that the font-name is the same as the package-name
442 # fontmap (key = package, val += found options) will be filled
443 # and used later in add_preamble_fonts() to be added to user-preamble
445 rfontscale = re.compile(r"^\s*(\\font_(roman|sans|typewriter|math))\s+")
446 rscales = re.compile(r"^\s*(\d+)\s+(\d+)")
448 while i < len(document.header):
449 i = find_re(document.header, rfontscale, i + 1)
452 mo = rfontscale.search(document.header[i])
455 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
456 val = get_value(document.header, ft, i)
457 words = val.split(" ") # ! splits also values like '"DejaVu Sans"'
458 font = words[0].strip('"') # TeX font name has no whitespace
459 if not font in fm.font2pkgmap:
461 fontinfo = fm.font2pkgmap[font]
462 val = fontinfo.package
463 if not val in fontmap:
466 if OnlyWithXOpts or WithXOpts:
467 if ft == "\\font_math":
469 regexp = re.compile(r"^\s*(\\font_roman_opts)\s+")
470 if ft == "\\font_sans":
471 regexp = re.compile(r"^\s*(\\font_sans_opts)\s+")
472 elif ft == "\\font_typewriter":
473 regexp = re.compile(r"^\s*(\\font_typewriter_opts)\s+")
474 x = find_re(document.header, regexp, 0)
475 if x == -1 and OnlyWithXOpts:
479 # We need to use this regex since split() does not handle quote protection
480 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
481 opts = xopts[1].strip('"').split(",")
482 fontmap[val].extend(opts)
483 del document.header[x]
484 words[0] = '"default"'
485 document.header[i] = ft + " " + " ".join(words)
486 if fontinfo.scaleopt != None:
487 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
488 mo = rscales.search(xval)
492 # set correct scale option
494 [fontinfo.scaleopt + "=" + format(float(xval1) / 100, ".2f")]
496 if fontinfo.osfopt != None:
498 if fontinfo.osfdef == "true":
500 osf = find_token(document.header, "\\font_osf " + oldval)
501 if osf == -1 and ft != "\\font_math":
502 # Try with newer format
503 osftag = "\\font_roman_osf " + oldval
504 if ft == "\\font_sans":
505 osftag = "\\font_sans_osf " + oldval
506 elif ft == "\\font_typewriter":
507 osftag = "\\font_typewriter_osf " + oldval
508 osf = find_token(document.header, osftag)
510 fontmap[val].extend([fontinfo.osfopt])
511 if len(fontinfo.options) > 0:
512 fontmap[val].extend(fontinfo.options)
516 ###############################################################################
518 ### Conversion and reversion routines
520 ###############################################################################
523 def convert_inputencoding_namechange(document):
524 """Rename inputencoding settings."""
525 i = find_token(document.header, "\\inputencoding", 0)
528 s = document.header[i].replace("auto", "auto-legacy")
529 document.header[i] = s.replace("default", "auto-legacy-plain")
532 def revert_inputencoding_namechange(document):
533 """Rename inputencoding settings."""
534 i = find_token(document.header, "\\inputencoding", 0)
537 s = document.header[i].replace("auto-legacy-plain", "default")
538 document.header[i] = s.replace("auto-legacy", "auto")
541 def convert_notoFonts(document):
542 """Handle Noto fonts definition to LaTeX"""
544 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
545 fm = createFontMapping(["Noto"])
546 convert_fonts(document, fm)
549 def revert_notoFonts(document):
550 """Revert native Noto font definition to LaTeX"""
552 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
554 fm = createFontMapping(["Noto"])
555 if revert_fonts(document, fm, fontmap):
556 add_preamble_fonts(document, fontmap)
559 def convert_latexFonts(document):
560 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
562 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
563 fm = createFontMapping(["DejaVu", "IBM"])
564 convert_fonts(document, fm)
567 def revert_latexFonts(document):
568 """Revert native DejaVu font definition to LaTeX"""
570 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
572 fm = createFontMapping(["DejaVu", "IBM"])
573 if revert_fonts(document, fm, fontmap):
574 add_preamble_fonts(document, fontmap)
577 def convert_AdobeFonts(document):
578 """Handle Adobe Source fonts definition to LaTeX"""
580 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
581 fm = createFontMapping(["Adobe"])
582 convert_fonts(document, fm)
585 def revert_AdobeFonts(document):
586 """Revert Adobe Source font definition to LaTeX"""
588 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
590 fm = createFontMapping(["Adobe"])
591 if revert_fonts(document, fm, fontmap):
592 add_preamble_fonts(document, fontmap)
595 def removeFrontMatterStyles(document):
596 """Remove styles Begin/EndFrontmatter"""
598 layouts = ["BeginFrontmatter", "EndFrontmatter"]
599 tokenend = len("\\begin_layout ")
602 i = find_token_exact(document.body, "\\begin_layout ", i + 1)
605 layout = document.body[i][tokenend:].strip()
606 if layout not in layouts:
608 j = find_end_of_layout(document.body, i)
610 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
612 while document.body[j + 1].strip() == "":
614 document.body[i : j + 1] = []
617 def addFrontMatterStyles(document):
618 """Use styles Begin/EndFrontmatter for elsarticle"""
620 if document.textclass != "elsarticle":
623 def insertFrontmatter(prefix, line):
625 while above > 0 and document.body[above - 1].strip() == "":
628 while document.body[below].strip() == "":
630 document.body[above:below] = [
632 "\\begin_layout " + prefix + "Frontmatter",
633 "\\begin_inset Note Note",
636 "\\begin_layout Plain Layout",
652 "Corresponding author",
658 tokenend = len("\\begin_layout ")
662 i = find_token_exact(document.body, "\\begin_layout ", i + 1)
665 layout = document.body[i][tokenend:].strip()
666 if layout not in layouts:
668 k = find_end_of_layout(document.body, i)
670 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
677 insertFrontmatter("End", k + 1)
678 insertFrontmatter("Begin", first)
681 def convert_lst_literalparam(document):
682 """Add param literal to include inset"""
686 i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
689 j = find_end_of_inset(document.body, i)
692 "Malformed LyX document: Can't find end of command inset at line %d" % i
695 while i < j and document.body[i].strip() != "":
697 document.body.insert(i, 'literal "true"')
700 def revert_lst_literalparam(document):
701 """Remove param literal from include inset"""
705 i = find_token(document.body, "\\begin_inset CommandInset include", i + 1)
708 j = find_end_of_inset(document.body, i)
711 "Malformed LyX document: Can't find end of include inset at line %d" % i
714 del_token(document.body, "literal", i, j)
717 def revert_paratype(document):
718 """Revert ParaType font definitions to LaTeX"""
720 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
721 i1 = find_token(document.header, '\\font_roman "PTSerif-TLF"', 0)
722 i2 = find_token(document.header, '\\font_sans "default"', 0)
723 i3 = find_token(document.header, '\\font_typewriter "default"', 0)
724 j = find_token(document.header, '\\font_sans "PTSans-TLF"', 0)
727 sfval = find_token(document.header, "\\font_sf_scale", 0)
729 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
731 sfscale = document.header[sfval].split()
734 document.header[sfval] = " ".join(sfscale)
737 sf_scale = float(val)
739 document.warning("Invalid font_sf_scale value: " + val)
742 if sf_scale != "100.0":
743 sfoption = "scaled=" + str(sf_scale / 100.0)
744 k = find_token(document.header, '\\font_typewriter "PTMono-TLF"', 0)
745 ttval = get_value(document.header, "\\font_tt_scale", 0)
750 ttoption = "scaled=" + format(float(ttval) / 100, ".2f")
751 if i1 != -1 and i2 != -1 and i3 != -1:
752 add_to_preamble(document, ["\\usepackage{paratype}"])
755 add_to_preamble(document, ["\\usepackage{PTSerif}"])
756 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
759 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
761 add_to_preamble(document, ["\\usepackage{PTSans}"])
762 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
765 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
767 add_to_preamble(document, ["\\usepackage{PTMono}"])
768 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
771 def revert_xcharter(document):
772 """Revert XCharter font definitions to LaTeX"""
774 i = find_token(document.header, '\\font_roman "xcharter"', 0)
778 # replace unsupported font setting
779 document.header[i] = document.header[i].replace("xcharter", "default")
780 # no need for preamble code with system fonts
781 if get_bool_value(document.header, "\\use_non_tex_fonts"):
784 # transfer old style figures setting to package options
785 j = find_token(document.header, "\\font_osf true")
788 document.header[j] = "\\font_osf false"
792 add_to_preamble(document, ["\\usepackage%s{XCharter}" % options])
795 def revert_lscape(document):
796 """Reverts the landscape environment (Landscape module) to TeX-code"""
798 if not "landscape" in document.get_module_list():
803 i = find_token(document.body, "\\begin_inset Flex Landscape", i + 1)
806 j = find_end_of_inset(document.body, i)
808 document.warning("Malformed LyX document: Can't find end of Landscape inset")
811 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
812 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
813 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
814 add_to_preamble(document, ["\\usepackage{afterpage}"])
816 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
817 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
819 add_to_preamble(document, ["\\usepackage{pdflscape}"])
820 document.del_module("landscape")
823 def convert_fontenc(document):
824 """Convert default fontenc setting"""
826 i = find_token(document.header, "\\fontencoding global", 0)
830 document.header[i] = document.header[i].replace("global", "auto")
833 def revert_fontenc(document):
834 """Revert default fontenc setting"""
836 i = find_token(document.header, "\\fontencoding auto", 0)
840 document.header[i] = document.header[i].replace("auto", "global")
843 def revert_nospellcheck(document):
844 """Remove nospellcheck font info param"""
848 i = find_token(document.body, "\\nospellcheck", i)
854 def revert_floatpclass(document):
855 """Remove float placement params 'document' and 'class'"""
857 del_token(document.header, "\\float_placement class")
861 i = find_token(document.body, "\\begin_inset Float", i + 1)
864 j = find_end_of_inset(document.body, i)
865 k = find_token(document.body, "placement class", i, j)
867 k = find_token(document.body, "placement document", i, j)
874 def revert_floatalignment(document):
875 """Remove float alignment params"""
877 galignment = get_value(document.header, "\\float_alignment", delete=True)
881 i = find_token(document.body, "\\begin_inset Float", i + 1)
884 j = find_end_of_inset(document.body, i)
887 "Malformed LyX document: Can't find end of inset at line " + str(i)
890 k = find_token(document.body, "alignment", i, j)
894 alignment = get_value(document.body, "alignment", k)
895 if alignment == "document":
896 alignment = galignment
898 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
900 document.warning("Can't find float layout!")
903 if alignment == "left":
904 alcmd = put_cmd_in_ert("\\raggedright{}")
905 elif alignment == "center":
906 alcmd = put_cmd_in_ert("\\centering{}")
907 elif alignment == "right":
908 alcmd = put_cmd_in_ert("\\raggedleft{}")
910 document.body[l + 1 : l + 1] = alcmd
911 # There might be subfloats, so we do not want to move past
912 # the end of the inset.
916 def revert_tuftecite(document):
917 r"""Revert \cite commands in tufte classes"""
919 tufte = ["tufte-book", "tufte-handout"]
920 if document.textclass not in tufte:
925 i = find_token(document.body, "\\begin_inset CommandInset citation", i + 1)
928 j = find_end_of_inset(document.body, i)
930 document.warning("Can't find end of citation inset at line %d!!" % (i))
932 k = find_token(document.body, "LatexCommand", i, j)
934 document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
937 cmd = get_value(document.body, "LatexCommand", k)
941 pre = get_quoted_value(document.body, "before", i, j)
942 post = get_quoted_value(document.body, "after", i, j)
943 key = get_quoted_value(document.body, "key", i, j)
945 document.warning("Citation inset at line %d does not have a key!" % (i))
947 # Replace command with ERT
950 res += "[" + pre + "]"
952 res += "[" + post + "]"
955 res += "{" + key + "}"
956 document.body[i : j + 1] = put_cmd_in_ert([res])
960 def revert_stretchcolumn(document):
961 """We remove the column varwidth flags or everything else will become a mess."""
964 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
967 j = find_end_of_inset(document.body, i + 1)
969 document.warning("Malformed LyX document: Could not find end of tabular.")
971 for k in range(i, j):
972 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
973 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
974 document.body[k] = document.body[k].replace(' varwidth="true"', "")
977 def revert_vcolumns(document):
978 """Revert standard columns with line breaks etc."""
984 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
987 j = find_end_of_inset(document.body, i)
989 document.warning("Malformed LyX document: Could not find end of tabular.")
992 # Collect necessary column information
994 nrows = int(document.body[i + 1].split('"')[3])
995 ncols = int(document.body[i + 1].split('"')[5])
997 for k in range(ncols):
998 m = find_token(document.body, "<column", m)
999 width = get_option_value(document.body[m], "width")
1000 varwidth = get_option_value(document.body[m], "varwidth")
1001 alignment = get_option_value(document.body[m], "alignment")
1002 special = get_option_value(document.body[m], "special")
1003 col_info.append([width, varwidth, alignment, special, m])
1008 for row in range(nrows):
1009 for col in range(ncols):
1010 m = find_token(document.body, "<cell", m)
1011 multicolumn = get_option_value(document.body[m], "multicolumn")
1012 multirow = get_option_value(document.body[m], "multirow")
1013 width = get_option_value(document.body[m], "width")
1014 rotate = get_option_value(document.body[m], "rotate")
1015 # Check for: linebreaks, multipars, non-standard environments
1017 endcell = find_token(document.body, "</cell>", begcell)
1020 find_token(document.body, "\\begin_inset Newline", begcell, endcell)
1024 elif count_pars_in_inset(document.body, begcell + 2) > 1:
1026 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
1031 and ((multicolumn == "" and multirow == "") or width == "")
1034 col_info[col][0] == ""
1035 and col_info[col][1] == ""
1036 and col_info[col][3] == ""
1039 alignment = col_info[col][2]
1040 col_line = col_info[col][4]
1042 if alignment == "center":
1043 vval = ">{\\centering}"
1044 elif alignment == "left":
1045 vval = ">{\\raggedright}"
1046 elif alignment == "right":
1047 vval = ">{\\raggedleft}"
1050 vval += "V{\\linewidth}"
1052 document.body[col_line] = (
1053 document.body[col_line][:-1] + ' special="' + vval + '">'
1055 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
1056 # with newlines, and we do not want that)
1058 endcell = find_token(document.body, "</cell>", begcell)
1062 "\\begin_inset Newline newline",
1069 "\\begin_inset Newline linebreak",
1076 nle = find_end_of_inset(document.body, nl)
1077 del document.body[nle : nle + 1]
1079 document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
1081 document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
1087 if needarray == True:
1088 add_to_preamble(document, ["\\usepackage{array}"])
1089 if needvarwidth == True:
1090 add_to_preamble(document, ["\\usepackage{varwidth}"])
1093 def revert_bibencoding(document):
1094 """Revert bibliography encoding"""
1098 i = find_token(document.header, "\\cite_engine", 0)
1100 document.warning("Malformed document! Missing \\cite_engine")
1102 engine = get_value(document.header, "\\cite_engine", i)
1106 if engine in ["biblatex", "biblatex-natbib"]:
1109 # Map lyx to latex encoding names
1113 "armscii8": "armscii8",
1114 "iso8859-1": "latin1",
1115 "iso8859-2": "latin2",
1116 "iso8859-3": "latin3",
1117 "iso8859-4": "latin4",
1118 "iso8859-5": "iso88595",
1119 "iso8859-6": "8859-6",
1120 "iso8859-7": "iso-8859-7",
1121 "iso8859-8": "8859-8",
1122 "iso8859-9": "latin5",
1123 "iso8859-13": "latin7",
1124 "iso8859-15": "latin9",
1125 "iso8859-16": "latin10",
1126 "applemac": "applemac",
1128 "cp437de": "cp437de",
1145 "utf8-platex": "utf8",
1151 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
1154 j = find_end_of_inset(document.body, i)
1156 document.warning("Can't find end of bibtex inset at line %d!!" % (i))
1158 encoding = get_quoted_value(document.body, "encoding", i, j)
1161 # remove encoding line
1162 k = find_token(document.body, "encoding", i, j)
1164 del document.body[k]
1165 if encoding == "default":
1167 # Re-find inset end line
1168 j = find_end_of_inset(document.body, i)
1171 h = find_token(document.header, "\\biblio_options", 0)
1173 biblio_options = get_value(document.header, "\\biblio_options", h)
1174 if not "bibencoding" in biblio_options:
1175 document.header[h] += ",bibencoding=%s" % encodings[encoding]
1177 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
1179 # this should not happen
1181 "Malformed LyX document! No \\biblatex_bibstyle header found!"
1184 document.header[bs - 1 : bs - 1] = [
1185 "\\biblio_options bibencoding=" + encodings[encoding]
1188 document.body[j + 1 : j + 1] = put_cmd_in_ert("\\egroup")
1189 document.body[i:i] = put_cmd_in_ert(
1190 "\\bgroup\\inputencoding{" + encodings[encoding] + "}"
1196 def convert_vcsinfo(document):
1197 """Separate vcs Info inset from buffer Info inset."""
1200 "vcs-revision": "revision",
1201 "vcs-tree-revision": "tree-revision",
1202 "vcs-author": "author",
1208 i = find_token(document.body, "\\begin_inset Info", i + 1)
1211 j = find_end_of_inset(document.body, i + 1)
1213 document.warning("Malformed LyX document: Could not find end of Info inset.")
1215 tp = find_token(document.body, "type", i, j)
1216 tpv = get_quoted_value(document.body, "type", tp)
1219 arg = find_token(document.body, "arg", i, j)
1220 argv = get_quoted_value(document.body, "arg", arg)
1221 if argv not in list(types.keys()):
1223 document.body[tp] = 'type "vcs"'
1224 document.body[arg] = 'arg "' + types[argv] + '"'
1227 def revert_vcsinfo(document):
1228 """Merge vcs Info inset to buffer Info inset."""
1230 args = ["revision", "tree-revision", "author", "time", "date"]
1233 i = find_token(document.body, "\\begin_inset Info", i + 1)
1236 j = find_end_of_inset(document.body, i + 1)
1238 document.warning("Malformed LyX document: Could not find end of Info inset.")
1240 tp = find_token(document.body, "type", i, j)
1241 tpv = get_quoted_value(document.body, "type", tp)
1244 arg = find_token(document.body, "arg", i, j)
1245 argv = get_quoted_value(document.body, "arg", arg)
1246 if argv not in args:
1247 document.warning("Malformed Info inset. Invalid vcs arg.")
1249 document.body[tp] = 'type "buffer"'
1250 document.body[arg] = 'arg "vcs-' + argv + '"'
1253 def revert_vcsinfo_rev_abbrev(document):
1254 "Convert abbreviated revisions to regular revisions."
1258 i = find_token(document.body, "\\begin_inset Info", i + 1)
1261 j = find_end_of_inset(document.body, i + 1)
1263 document.warning("Malformed LyX document: Could not find end of Info inset.")
1265 tp = find_token(document.body, "type", i, j)
1266 tpv = get_quoted_value(document.body, "type", tp)
1269 arg = find_token(document.body, "arg", i, j)
1270 argv = get_quoted_value(document.body, "arg", arg)
1271 if argv == "revision-abbrev":
1272 document.body[arg] = 'arg "revision"'
1275 def revert_dateinfo(document):
1276 """Revert date info insets to static text."""
1278 # FIXME This currently only considers the main language and uses the system locale
1279 # Ideally, it should honor context languages and switch the locale accordingly.
1281 # The date formats for each language using strftime syntax:
1282 # long, short, loclong, locmedium, locshort
1284 "afrikaans": ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1285 "albanian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1286 "american": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1287 "amharic": ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1323 "australian": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1324 "austrian": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1325 "bahasa": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1326 "bahasam": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1327 "basque": ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1343 "%A, %d de %B de %Y",
1349 "breton": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1350 "british": ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1358 "canadian": ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1359 "canadien": ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1367 "chinese-simplified": [
1374 "chinese-traditional": [
1381 "coptic": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1389 "czech": ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1397 "divehi": ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1398 "dutch": ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1399 "english": ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1407 "estonian": ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1408 "farsi": ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1409 "finnish": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1410 "french": ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1412 "%A %d di %B dal %Y",
1419 "%A, %d de %B de %Y",
1425 "georgian": ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1426 "german": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1441 "greek": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1442 "hebrew": ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1443 "hindi": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1458 "irish": ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1459 "italian": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1474 "kannada": ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1475 "kazakh": ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1476 "khmer": ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1484 "kurmanji": ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1485 "lao": ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1486 "latin": ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1488 "%A, %Y. gada %d. %B",
1495 "%Y m. %B %d d., %A",
1508 "macedonian": ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1516 "malayalam": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1517 "marathi": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1519 "%A, %Y оны %m сарын %d",
1521 "%Y оны %m сарын %d",
1532 "newzealand": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1533 "ngerman": ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1534 "norsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1535 "nynorsk": ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1536 "occitan": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1544 "polish": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1545 "polutonikogreek": [
1553 "%A, %d de %B de %Y",
1559 "romanian": ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1561 "%A, ils %d da %B %Y",
1581 "sanskrit": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1582 "scottish": ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1597 "slovak": ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1606 "%A, %d de %B de %Y",
1613 "%A, %d de %B %de %Y",
1619 "swedish": ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1620 "syriac": ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1621 "tamil": ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1622 "telugu": ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1623 "thai": ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1625 "%Y %Bའི་ཚེས་%d, %A",
1631 "turkish": ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1653 "urdu": ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1661 "welsh": ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1664 types = ["date", "fixdate", "moddate"]
1665 lang = get_value(document.header, "\\language")
1667 document.warning("Malformed LyX document! No \\language header found!")
1672 i = find_token(document.body, "\\begin_inset Info", i + 1)
1675 j = find_end_of_inset(document.body, i + 1)
1677 document.warning("Malformed LyX document: Could not find end of Info inset.")
1679 tp = find_token(document.body, "type", i, j)
1680 tpv = get_quoted_value(document.body, "type", tp)
1681 if tpv not in types:
1683 arg = find_token(document.body, "arg", i, j)
1684 argv = get_quoted_value(document.body, "arg", arg)
1687 if tpv == "fixdate":
1688 datecomps = argv.split("@")
1689 if len(datecomps) > 1:
1691 isodate = datecomps[1]
1692 m = re.search(r"(\d\d\d\d)-(\d\d)-(\d\d)", isodate)
1694 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1695 # FIXME if we had the path to the original document (not the one in the tmp dir),
1696 # we could use the mtime.
1697 # elif tpv == "moddate":
1698 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1701 result = dte.isodate()
1702 elif argv == "long":
1703 result = dte.strftime(dateformats[lang][0])
1704 elif argv == "short":
1705 result = dte.strftime(dateformats[lang][1])
1706 elif argv == "loclong":
1707 result = dte.strftime(dateformats[lang][2])
1708 elif argv == "locmedium":
1709 result = dte.strftime(dateformats[lang][3])
1710 elif argv == "locshort":
1711 result = dte.strftime(dateformats[lang][4])
1714 argv.replace("MMMM", "%b")
1715 .replace("MMM", "%b")
1716 .replace("MM", "%m")
1719 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1720 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1721 fmt = re.sub("[^'%]d", "%d", fmt)
1722 fmt = fmt.replace("'", "")
1723 result = dte.strftime(fmt)
1724 document.body[i : j + 1] = [result]
1727 def revert_timeinfo(document):
1728 """Revert time info insets to static text."""
1730 # FIXME This currently only considers the main language and uses the system locale
1731 # Ideally, it should honor context languages and switch the locale accordingly.
1732 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1735 # The time formats for each language using strftime syntax:
1738 "afrikaans": ["%H:%M:%S %Z", "%H:%M"],
1739 "albanian": ["%I:%M:%S %p, %Z", "%I:%M %p"],
1740 "american": ["%I:%M:%S %p %Z", "%I:%M %p"],
1741 "amharic": ["%I:%M:%S %p %Z", "%I:%M %p"],
1742 "ancientgreek": ["%H:%M:%S %Z", "%H:%M:%S"],
1743 "arabic_arabi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1744 "arabic_arabtex": ["%I:%M:%S %p %Z", "%I:%M %p"],
1745 "armenian": ["%H:%M:%S %Z", "%H:%M"],
1746 "asturian": ["%H:%M:%S %Z", "%H:%M"],
1747 "australian": ["%I:%M:%S %p %Z", "%I:%M %p"],
1748 "austrian": ["%H:%M:%S %Z", "%H:%M"],
1749 "bahasa": ["%H.%M.%S %Z", "%H.%M"],
1750 "bahasam": ["%I:%M:%S %p %Z", "%I:%M %p"],
1751 "basque": ["%H:%M:%S (%Z)", "%H:%M"],
1752 "belarusian": ["%H:%M:%S, %Z", "%H:%M"],
1753 "bosnian": ["%H:%M:%S %Z", "%H:%M"],
1754 "brazilian": ["%H:%M:%S %Z", "%H:%M"],
1755 "breton": ["%H:%M:%S %Z", "%H:%M"],
1756 "british": ["%H:%M:%S %Z", "%H:%M"],
1757 "bulgarian": ["%H:%M:%S %Z", "%H:%M"],
1758 "canadian": ["%I:%M:%S %p %Z", "%I:%M %p"],
1759 "canadien": ["%H:%M:%S %Z", "%H h %M"],
1760 "catalan": ["%H:%M:%S %Z", "%H:%M"],
1761 "chinese-simplified": ["%Z %p%I:%M:%S", "%p%I:%M"],
1762 "chinese-traditional": ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1763 "coptic": ["%H:%M:%S %Z", "%H:%M:%S"],
1764 "croatian": ["%H:%M:%S (%Z)", "%H:%M"],
1765 "czech": ["%H:%M:%S %Z", "%H:%M"],
1766 "danish": ["%H.%M.%S %Z", "%H.%M"],
1767 "divehi": ["%H:%M:%S %Z", "%H:%M"],
1768 "dutch": ["%H:%M:%S %Z", "%H:%M"],
1769 "english": ["%I:%M:%S %p %Z", "%I:%M %p"],
1770 "esperanto": ["%H:%M:%S %Z", "%H:%M:%S"],
1771 "estonian": ["%H:%M:%S %Z", "%H:%M"],
1772 "farsi": ["%H:%M:%S (%Z)", "%H:%M"],
1773 "finnish": ["%H.%M.%S %Z", "%H.%M"],
1774 "french": ["%H:%M:%S %Z", "%H:%M"],
1775 "friulan": ["%H:%M:%S %Z", "%H:%M"],
1776 "galician": ["%H:%M:%S %Z", "%H:%M"],
1777 "georgian": ["%H:%M:%S %Z", "%H:%M"],
1778 "german": ["%H:%M:%S %Z", "%H:%M"],
1779 "german-ch": ["%H:%M:%S %Z", "%H:%M"],
1780 "german-ch-old": ["%H:%M:%S %Z", "%H:%M"],
1781 "greek": ["%I:%M:%S %p %Z", "%I:%M %p"],
1782 "hebrew": ["%H:%M:%S %Z", "%H:%M"],
1783 "hindi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1784 "icelandic": ["%H:%M:%S %Z", "%H:%M"],
1785 "interlingua": ["%H:%M:%S %Z", "%H:%M"],
1786 "irish": ["%H:%M:%S %Z", "%H:%M"],
1787 "italian": ["%H:%M:%S %Z", "%H:%M"],
1788 "japanese": ["%H時%M分%S秒 %Z", "%H:%M"],
1789 "japanese-cjk": ["%H時%M分%S秒 %Z", "%H:%M"],
1790 "kannada": ["%I:%M:%S %p %Z", "%I:%M %p"],
1791 "kazakh": ["%H:%M:%S %Z", "%H:%M"],
1792 "khmer": ["%I:%M:%S %p %Z", "%I:%M %p"],
1793 "korean": ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1794 "kurmanji": ["%H:%M:%S %Z", "%H:%M:%S"],
1795 "lao": ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1796 "latin": ["%H:%M:%S %Z", "%H:%M:%S"],
1797 "latvian": ["%H:%M:%S %Z", "%H:%M"],
1798 "lithuanian": ["%H:%M:%S %Z", "%H:%M"],
1799 "lowersorbian": ["%H:%M:%S %Z", "%H:%M"],
1800 "macedonian": ["%H:%M:%S %Z", "%H:%M"],
1801 "magyar": ["%H:%M:%S %Z", "%H:%M"],
1802 "malayalam": ["%p %I:%M:%S %Z", "%p %I:%M"],
1803 "marathi": ["%I:%M:%S %p %Z", "%I:%M %p"],
1804 "mongolian": ["%H:%M:%S %Z", "%H:%M"],
1805 "naustrian": ["%H:%M:%S %Z", "%H:%M"],
1806 "newzealand": ["%I:%M:%S %p %Z", "%I:%M %p"],
1807 "ngerman": ["%H:%M:%S %Z", "%H:%M"],
1808 "norsk": ["%H:%M:%S %Z", "%H:%M"],
1809 "nynorsk": ["kl. %H:%M:%S %Z", "%H:%M"],
1810 "occitan": ["%H:%M:%S %Z", "%H:%M"],
1811 "piedmontese": ["%H:%M:%S %Z", "%H:%M:%S"],
1812 "polish": ["%H:%M:%S %Z", "%H:%M"],
1813 "polutonikogreek": ["%I:%M:%S %p %Z", "%I:%M %p"],
1814 "portuguese": ["%H:%M:%S %Z", "%H:%M"],
1815 "romanian": ["%H:%M:%S %Z", "%H:%M"],
1816 "romansh": ["%H:%M:%S %Z", "%H:%M"],
1817 "russian": ["%H:%M:%S %Z", "%H:%M"],
1818 "samin": ["%H:%M:%S %Z", "%H:%M"],
1819 "sanskrit": ["%H:%M:%S %Z", "%H:%M"],
1820 "scottish": ["%H:%M:%S %Z", "%H:%M"],
1821 "serbian": ["%H:%M:%S %Z", "%H:%M"],
1822 "serbian-latin": ["%H:%M:%S %Z", "%H:%M"],
1823 "slovak": ["%H:%M:%S %Z", "%H:%M"],
1824 "slovene": ["%H:%M:%S %Z", "%H:%M"],
1825 "spanish": ["%H:%M:%S (%Z)", "%H:%M"],
1826 "spanish-mexico": ["%H:%M:%S %Z", "%H:%M"],
1827 "swedish": ["kl. %H:%M:%S %Z", "%H:%M"],
1828 "syriac": ["%H:%M:%S %Z", "%H:%M"],
1829 "tamil": ["%p %I:%M:%S %Z", "%p %I:%M"],
1830 "telugu": ["%I:%M:%S %p %Z", "%I:%M %p"],
1831 "thai": ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1832 "tibetan": ["%I:%M:%S %p %Z", "%I:%M %p"],
1833 "turkish": ["%H:%M:%S %Z", "%H:%M"],
1834 "turkmen": ["%H:%M:%S %Z", "%H:%M"],
1835 "ukrainian": ["%H:%M:%S %Z", "%H:%M"],
1836 "uppersorbian": ["%H:%M:%S %Z", "%H:%M hodź."],
1837 "urdu": ["%I:%M:%S %p %Z", "%I:%M %p"],
1838 "vietnamese": ["%H:%M:%S %Z", "%H:%M"],
1839 "welsh": ["%H:%M:%S %Z", "%H:%M"],
1842 types = ["time", "fixtime", "modtime"]
1843 i = find_token(document.header, "\\language", 0)
1845 # this should not happen
1846 document.warning("Malformed LyX document! No \\language header found!")
1848 lang = get_value(document.header, "\\language", i)
1852 i = find_token(document.body, "\\begin_inset Info", i + 1)
1855 j = find_end_of_inset(document.body, i + 1)
1857 document.warning("Malformed LyX document: Could not find end of Info inset.")
1859 tp = find_token(document.body, "type", i, j)
1860 tpv = get_quoted_value(document.body, "type", tp)
1861 if tpv not in types:
1863 arg = find_token(document.body, "arg", i, j)
1864 argv = get_quoted_value(document.body, "arg", arg)
1866 dtme = datetime.now()
1868 if tpv == "fixtime":
1869 timecomps = argv.split("@")
1870 if len(timecomps) > 1:
1872 isotime = timecomps[1]
1873 m = re.search(r"(\d\d):(\d\d):(\d\d)", isotime)
1875 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1877 m = re.search(r"(\d\d):(\d\d)", isotime)
1879 tme = time(int(m.group(1)), int(m.group(2)))
1880 # FIXME if we had the path to the original document (not the one in the tmp dir),
1881 # we could use the mtime.
1882 # elif tpv == "moddate":
1883 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1886 result = tme.isoformat()
1887 elif argv == "long":
1888 result = tme.strftime(timeformats[lang][0])
1889 elif argv == "short":
1890 result = tme.strftime(timeformats[lang][1])
1893 argv.replace("HH", "%H")
1895 .replace("hh", "%I")
1899 fmt.replace("mm", "%M")
1901 .replace("ss", "%S")
1904 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1906 fmt.replace("AP", "%p")
1907 .replace("ap", "%p")
1911 fmt = fmt.replace("'", "")
1912 result = dte.strftime(fmt)
1913 document.body[i : j + 1] = result
1916 def revert_namenoextinfo(document):
1917 """Merge buffer Info inset type name-noext to name."""
1921 i = find_token(document.body, "\\begin_inset Info", i + 1)
1924 j = find_end_of_inset(document.body, i + 1)
1926 document.warning("Malformed LyX document: Could not find end of Info inset.")
1928 tp = find_token(document.body, "type", i, j)
1929 tpv = get_quoted_value(document.body, "type", tp)
1932 arg = find_token(document.body, "arg", i, j)
1933 argv = get_quoted_value(document.body, "arg", arg)
1934 if argv != "name-noext":
1936 document.body[arg] = 'arg "name"'
1939 def revert_l7ninfo(document):
1940 """Revert l7n Info inset to text."""
1944 i = find_token(document.body, "\\begin_inset Info", i + 1)
1947 j = find_end_of_inset(document.body, i + 1)
1949 document.warning("Malformed LyX document: Could not find end of Info inset.")
1951 tp = find_token(document.body, "type", i, j)
1952 tpv = get_quoted_value(document.body, "type", tp)
1955 arg = find_token(document.body, "arg", i, j)
1956 argv = get_quoted_value(document.body, "arg", arg)
1957 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1961 .replace(" & ", "</amp;>")
1963 .replace("</amp;>", " & ")
1965 document.body[i : j + 1] = argv
1968 def revert_listpargs(document):
1969 """Reverts listpreamble arguments to TeX-code"""
1972 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i + 1)
1975 j = find_end_of_inset(document.body, i)
1976 # Find containing paragraph layout
1977 parent = get_containing_layout(document.body, i)
1979 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1982 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1983 endPlain = find_end_of_layout(document.body, beginPlain)
1984 content = document.body[beginPlain + 1 : endPlain]
1985 del document.body[i : j + 1]
1988 "\\begin_inset ERT",
1991 "\\begin_layout Plain Layout",
1995 + ["}", "\\end_layout", "", "\\end_inset", ""]
1997 document.body[parbeg:parbeg] = subst
2000 def revert_lformatinfo(document):
2001 """Revert layout format Info inset to text."""
2005 i = find_token(document.body, "\\begin_inset Info", i + 1)
2008 j = find_end_of_inset(document.body, i + 1)
2010 document.warning("Malformed LyX document: Could not find end of Info inset.")
2012 tp = find_token(document.body, "type", i, j)
2013 tpv = get_quoted_value(document.body, "type", tp)
2014 if tpv != "lyxinfo":
2016 arg = find_token(document.body, "arg", i, j)
2017 argv = get_quoted_value(document.body, "arg", arg)
2018 if argv != "layoutformat":
2021 document.body[i : j + 1] = "69"
2024 def convert_hebrew_parentheses(document):
2025 """Swap opening/closing parentheses in Hebrew text.
2027 Up to LyX 2.4, "(" was used as closing parenthesis and
2028 ")" as opening parenthesis for Hebrew in the LyX source.
2030 current_languages = [document.language]
2031 current_layouts = []
2033 # pass thru argument insets
2034 skip_layouts_arguments = {}
2035 skip_insets_arguments = {}
2037 skip_insets = ["Formula", "ERT", "listings", "Flex URL"]
2038 # pass thru insets per document class
2039 if document.textclass in [
2041 "scrarticle-beamer",
2045 skip_layouts_arguments.update(
2047 "Itemize": ["1", "item:2"],
2048 "Enumerate": ["1", "item:2"],
2049 "Description": ["1", "item:1"],
2053 "Subsection": ["1"],
2054 "Subsection*": ["1"],
2055 "Subsubsection": ["1"],
2056 "Subsubsection*": ["1"],
2057 "Frame": ["1", "2"],
2058 "AgainFrame": ["1", "2"],
2059 "PlainFrame": ["1", "2"],
2060 "FragileFrame": ["1", "2"],
2061 "FrameTitle": ["1"],
2062 "FrameSubtitle": ["1"],
2063 "Overprint": ["item:1"],
2067 "ExampleBlock": ["1"],
2068 "AlertBlock": ["1"],
2074 "Definition": ["1"],
2075 "Definitions": ["1"],
2085 skip_insets_arguments.update(
2088 "Flex Emphasize": ["1"],
2089 "Flex Alert": ["1"],
2090 "Flex Structure": ["1"],
2092 "Flex Uncover": ["1"],
2093 "Flex Visible": ["1"],
2094 "Flex Invisible": ["1"],
2095 "Flex Alternative": ["1"],
2096 "Flex Beamer Note": ["1"],
2099 elif document.textclass == "europecv":
2100 skip_layouts_arguments.update({"Picture": ["1"], "Item": ["1"], "MotherTongue": ["1"]})
2101 elif document.textclass in ["acmsiggraph", "acmsiggraph-0-92"]:
2102 skip_insets_arguments.update({"Flex CRcat": ["1", "2", "3"]})
2103 elif document.textclass in ["aastex", "aastex6", "aastex62"]:
2104 skip_layouts_arguments.update(
2106 "Altaffilation": ["1"],
2109 elif document.textclass == "jss":
2110 skip_insets.append("Flex Code Chunk")
2111 elif document.textclass == "moderncv":
2112 skip_layouts_arguments.update(
2114 "Photo": ["1", "2"],
2117 skip_insets_arguments.update({"Flex Column": ["1"]})
2118 elif document.textclass == "agutex":
2119 skip_layouts_arguments.update({"Author affiliation": ["1"]})
2120 elif document.textclass in ["ijmpd", "ijmpc"]:
2121 skip_layouts_arguments.update({"RomanList": ["1"]})
2122 elif document.textclass in ["jlreq-book", "jlreq-report", "jlreq-article"]:
2123 skip_insets.append("Flex Warichu*")
2124 # pathru insets per module
2125 if "hpstatement" in document.get_module_list():
2126 skip_insets.append("Flex H-P number")
2127 if "tcolorbox" in document.get_module_list():
2128 skip_layouts_arguments.update({"New Color Box Type": ["3"]})
2129 if "sweave" in document.get_module_list():
2132 "Flex Sweave Options",
2133 "Flex S/R expression",
2134 "Flex Sweave Input File",
2138 if "knitr" in document.get_module_list():
2139 skip_insets.extend(["Flex Sweave Options", "Flex S/R expression", "Flex Chunk"])
2140 if "linguistics" in document.get_module_list():
2141 skip_layouts_arguments.update(
2143 "Numbered Example (multiline)": ["1"],
2144 "Numbered Examples (consecutive)": ["1"],
2145 "Subexample": ["1"],
2148 if "chessboard" in document.get_module_list():
2149 skip_insets.append("Flex Mainline")
2150 skip_layouts_arguments.update({"NewChessGame": ["1"]})
2151 skip_insets_arguments.update({"Flex ChessBoard": ["1"]})
2152 if "lilypond" in document.get_module_list():
2153 skip_insets.append("Flex LilyPond")
2154 if "noweb" in document.get_module_list():
2155 skip_insets.append("Flex Chunk")
2156 if "multicol" in document.get_module_list():
2157 skip_insets_arguments.update({"Flex Multiple Columns": ["1"]})
2159 inset_is_arg = False
2160 while i < len(document.body):
2161 line = document.body[i]
2162 if line.startswith("\\lang "):
2163 tokenend = len("\\lang ")
2164 lang = line[tokenend:].strip()
2165 current_languages[-1] = lang
2166 elif line.startswith("\\begin_layout "):
2167 current_languages.append(current_languages[-1])
2168 tokenend = len("\\begin_layout ")
2169 layout = line[tokenend:].strip()
2170 current_layouts.append(layout)
2171 elif line.startswith("\\end_layout"):
2172 current_languages.pop()
2173 current_layouts.pop()
2174 elif line.startswith("\\begin_inset Argument "):
2175 tokenend = len("\\begin_inset Argument ")
2176 Argument = line[tokenend:].strip()
2177 # all listpreamble:1 arguments are pass thru
2178 listpreamble = Argument == "listpreamble:1"
2179 layout_arg = current_layouts and Argument in skip_layouts_arguments.get(
2180 current_layouts[-1], []
2182 inset_arg = current_insets and Argument in skip_insets_arguments.get(
2183 current_insets[-1], []
2185 if layout_arg or inset_arg or listpreamble:
2186 # In these arguments, parentheses must not be changed
2187 i = find_end_of_inset(document.body, i) + 1
2191 elif line.startswith("\\begin_inset "):
2192 tokenend = len("\\begin_inset ")
2193 inset = line[tokenend:].strip()
2194 current_insets.append(inset)
2195 if inset in skip_insets:
2196 # In these insets, parentheses must not be changed
2197 i = find_end_of_inset(document.body, i)
2199 elif line.startswith("\\end_inset"):
2201 inset_is_arg = is_in_inset(document.body, i, "\\begin_inset Argument")[0] != -1
2203 current_insets.pop()
2204 elif current_languages[-1] == "hebrew" and not line.startswith("\\"):
2205 document.body[i] = line.replace("(", "\x00").replace(")", "(").replace("\x00", ")")
2209 def revert_hebrew_parentheses(document):
2210 """Store parentheses in Hebrew text reversed"""
2211 # This only exists to keep the convert/revert naming convention
2212 convert_hebrew_parentheses(document)
2215 def revert_malayalam(document):
2216 """Set the document language to English but assure Malayalam output"""
2218 revert_language(document, "malayalam", "", "malayalam")
2221 def revert_soul(document):
2222 """Revert soul module flex insets to ERT"""
2224 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
2227 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
2229 add_to_preamble(document, ["\\usepackage{soul}"])
2231 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
2233 add_to_preamble(document, ["\\usepackage{color}"])
2235 revert_flex_inset(document.body, "Spaceletters", "\\so")
2236 revert_flex_inset(document.body, "Strikethrough", "\\st")
2237 revert_flex_inset(document.body, "Underline", "\\ul")
2238 revert_flex_inset(document.body, "Highlight", "\\hl")
2239 revert_flex_inset(document.body, "Capitalize", "\\caps")
2242 def revert_tablestyle(document):
2243 """Remove tablestyle params"""
2245 i = find_token(document.header, "\\tablestyle")
2247 del document.header[i]
2250 def revert_bibfileencodings(document):
2251 """Revert individual Biblatex bibliography encodings"""
2255 i = find_token(document.header, "\\cite_engine", 0)
2257 document.warning("Malformed document! Missing \\cite_engine")
2259 engine = get_value(document.header, "\\cite_engine", i)
2263 if engine in ["biblatex", "biblatex-natbib"]:
2266 # Map lyx to latex encoding names
2270 "armscii8": "armscii8",
2271 "iso8859-1": "latin1",
2272 "iso8859-2": "latin2",
2273 "iso8859-3": "latin3",
2274 "iso8859-4": "latin4",
2275 "iso8859-5": "iso88595",
2276 "iso8859-6": "8859-6",
2277 "iso8859-7": "iso-8859-7",
2278 "iso8859-8": "8859-8",
2279 "iso8859-9": "latin5",
2280 "iso8859-13": "latin7",
2281 "iso8859-15": "latin9",
2282 "iso8859-16": "latin10",
2283 "applemac": "applemac",
2285 "cp437de": "cp437de",
2302 "utf8-platex": "utf8",
2308 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i + 1)
2311 j = find_end_of_inset(document.body, i)
2313 document.warning("Can't find end of bibtex inset at line %d!!" % (i))
2315 encodings = get_quoted_value(document.body, "file_encodings", i, j)
2319 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
2320 opts = get_quoted_value(document.body, "biblatexopts", i, j)
2321 if len(bibfiles) == 0:
2322 document.warning("Bibtex inset at line %d does not have a bibfile!" % (i))
2323 # remove encoding line
2324 k = find_token(document.body, "file_encodings", i, j)
2326 del document.body[k]
2327 # Re-find inset end line
2328 j = find_end_of_inset(document.body, i)
2330 enclist = encodings.split("\t")
2333 ppp = pp.split(" ", 1)
2334 encmap[ppp[0]] = ppp[1]
2335 for bib in bibfiles:
2336 pr = "\\addbibresource"
2337 if bib in encmap.keys():
2338 pr += "[bibencoding=" + encmap[bib] + "]"
2339 pr += "{" + bib + "}"
2340 add_to_preamble(document, [pr])
2341 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
2342 pcmd = "printbibliography"
2344 pcmd += "[" + opts + "]"
2346 "\\begin_inset ERT",
2349 "\\begin_layout Plain Layout",
2361 "\\begin_layout Standard",
2362 "\\begin_inset Note Note",
2365 "\\begin_layout Plain Layout",
2367 repl += document.body[i : j + 1]
2368 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
2369 document.body[i : j + 1] = repl
2375 def revert_cmidruletrimming(document):
2376 """Remove \\cmidrule trimming"""
2378 # FIXME: Revert to TeX code?
2381 # first, let's find out if we need to do anything
2382 i = find_token(document.body, "<cell ", i + 1)
2385 j = document.body[i].find('trim="')
2388 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
2389 # remove trim option
2390 document.body[i] = rgx.sub("", document.body[i])
2394 r"### Inserted by lyx2lyx (ruby inset) ###",
2395 r"InsetLayout Flex:Ruby",
2396 r" LyxType charstyle",
2397 r" LatexType command",
2401 r" HTMLInnerTag rb",
2402 r' HTMLInnerAttr ""',
2404 r' LabelString "Ruby"',
2405 r" Decoration Conglomerate",
2407 r" \ifdefined\kanjiskip",
2408 r" \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}",
2409 r" \else \ifdefined\luatexversion",
2410 r" \usepackage{luatexja-ruby}",
2411 r" \else \ifdefined\XeTeXversion",
2412 r" \usepackage{ruby}%",
2414 r" \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}",
2416 r" Argument post:1",
2417 r' LabelString "ruby text"',
2418 r' MenuString "Ruby Text|R"',
2419 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
2420 r" Decoration Conglomerate",
2433 def convert_ruby_module(document):
2434 """Use ruby module instead of local module definition"""
2435 if document.del_local_layout(ruby_inset_def):
2436 document.add_module("ruby")
2439 def revert_ruby_module(document):
2440 """Replace ruby module with local module definition"""
2441 if document.del_module("ruby"):
2442 document.append_local_layout(ruby_inset_def)
2445 def convert_utf8_japanese(document):
2446 """Use generic utf8 with Japanese documents."""
2447 lang = get_value(document.header, "\\language")
2448 if not lang.startswith("japanese"):
2450 inputenc = get_value(document.header, "\\inputencoding")
2451 if (lang == "japanese" and inputenc == "utf8-platex") or (
2452 lang == "japanese-cjk" and inputenc == "utf8-cjk"
2454 document.set_parameter("inputencoding", "utf8")
2457 def revert_utf8_japanese(document):
2458 """Use Japanese utf8 variants with Japanese documents."""
2459 inputenc = get_value(document.header, "\\inputencoding")
2460 if inputenc != "utf8":
2462 lang = get_value(document.header, "\\language")
2463 if lang == "japanese":
2464 document.set_parameter("inputencoding", "utf8-platex")
2465 if lang == "japanese-cjk":
2466 document.set_parameter("inputencoding", "utf8-cjk")
2469 def revert_lineno(document):
2470 "Replace lineno setting with user-preamble code."
2472 options = get_quoted_value(document.header, "\\lineno_options", delete=True)
2473 if not get_bool_value(document.header, "\\use_lineno", delete=True):
2476 options = "[" + options + "]"
2477 add_to_preamble(document, ["\\usepackage%s{lineno}" % options, "\\linenumbers"])
2480 def convert_lineno(document):
2481 "Replace user-preamble code with native lineno support."
2484 i = find_token(document.preamble, "\\linenumbers", 1)
2486 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i - 1])
2489 options = usepkg.group(1).strip("[]")
2490 del document.preamble[i - 1 : i + 1]
2492 del_token(document.preamble, "% Added by lyx2lyx", i - 2, i - 1)
2494 k = find_token(document.header, "\\index ")
2496 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
2498 document.header[k:k] = [
2499 "\\use_lineno %d" % use_lineno,
2500 "\\lineno_options %s" % options,
2504 def convert_aaencoding(document):
2505 "Convert default document option due to encoding change in aa class."
2507 if document.textclass != "aa":
2510 i = find_token(document.header, "\\use_default_options true")
2513 val = get_value(document.header, "\\inputencoding")
2515 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
2517 if val == "auto-legacy" or val == "latin9":
2518 document.header[i] = "\\use_default_options false"
2519 k = find_token(document.header, "\\options")
2521 document.header.insert(i, "\\options latin9")
2523 document.header[k] += ",latin9"
2526 def revert_aaencoding(document):
2527 "Revert default document option due to encoding change in aa class."
2529 if document.textclass != "aa":
2532 i = find_token(document.header, "\\use_default_options true")
2535 val = get_value(document.header, "\\inputencoding")
2537 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
2540 document.header[i] = "\\use_default_options false"
2541 k = find_token(document.header, "\\options", 0)
2543 document.header.insert(i, "\\options utf8")
2545 document.header[k] = document.header[k] + ",utf8"
2548 def revert_new_languages(document):
2549 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
2550 and Russian (Petrine orthography)."""
2552 # lyxname: (babelname, polyglossianame)
2554 "azerbaijani": ("azerbaijani", ""),
2555 "bengali": ("", "bengali"),
2556 "churchslavonic": ("", "churchslavonic"),
2557 "oldrussian": ("", "russian"),
2558 "korean": ("", "korean"),
2560 if document.language in new_languages:
2561 used_languages = {document.language}
2563 used_languages = set()
2566 i = find_token(document.body, "\\lang", i + 1)
2569 val = get_value(document.body, "\\lang", i)
2570 if val in new_languages:
2571 used_languages.add(val)
2573 # Korean is already supported via CJK, so leave as-is for Babel
2574 if "korean" in used_languages and (
2575 not get_bool_value(document.header, "\\use_non_tex_fonts")
2576 or get_value(document.header, "\\language_package") == "babel"
2578 used_languages.discard("korean")
2580 for lang in used_languages:
2581 revert_language(document, lang, *new_languages[lang])
2585 r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
2586 r"InsetLayout Flex:Glosse",
2588 r' LabelString "Gloss (old version)"',
2589 r' MenuString "Gloss (old version)"',
2590 r" LatexType environment",
2591 r" LatexName linggloss",
2592 r" Decoration minimalistic",
2597 r" CustomPars false",
2598 r" ForcePlain true",
2599 r" ParbreakIsNewline true",
2600 r" FreeSpacing true",
2601 r" Requires covington",
2604 r" \@ifundefined{linggloss}{%",
2605 r" \newenvironment{linggloss}[2][]{",
2606 r" \def\glosstr{\glt #1}%",
2608 r" {\glosstr\glend}}{}",
2611 r" ResetsFont true",
2613 r" Decoration conglomerate",
2614 r' LabelString "Translation"',
2615 r' MenuString "Glosse Translation|s"',
2616 r' Tooltip "Add a translation for the glosse"',
2621 glosss_inset_def = [
2622 r"### Inserted by lyx2lyx (deprecated ling glosses) ###",
2623 r"InsetLayout Flex:Tri-Glosse",
2625 r' LabelString "Tri-Gloss (old version)"',
2626 r' MenuString "Tri-Gloss (old version)"',
2627 r" LatexType environment",
2628 r" LatexName lingglosss",
2629 r" Decoration minimalistic",
2634 r" CustomPars false",
2635 r" ForcePlain true",
2636 r" ParbreakIsNewline true",
2637 r" FreeSpacing true",
2639 r" Requires covington",
2642 r" \@ifundefined{lingglosss}{%",
2643 r" \newenvironment{lingglosss}[2][]{",
2644 r" \def\glosstr{\glt #1}%",
2646 r" {\glosstr\glend}}{}",
2648 r" ResetsFont true",
2650 r" Decoration conglomerate",
2651 r' LabelString "Translation"',
2652 r' MenuString "Glosse Translation|s"',
2653 r' Tooltip "Add a translation for the glosse"',
2659 def convert_linggloss(document):
2660 "Move old ling glosses to local layout"
2661 if find_token(document.body, "\\begin_inset Flex Glosse", 0) != -1:
2662 document.append_local_layout(gloss_inset_def)
2663 if find_token(document.body, "\\begin_inset Flex Tri-Glosse", 0) != -1:
2664 document.append_local_layout(glosss_inset_def)
2667 def revert_linggloss(document):
2668 "Revert to old ling gloss definitions"
2669 if not "linguistics" in document.get_module_list():
2671 document.del_local_layout(gloss_inset_def)
2672 document.del_local_layout(glosss_inset_def)
2676 "\\begin_inset Flex Interlinear Gloss (2 Lines)",
2677 "\\begin_inset Flex Interlinear Gloss (3 Lines)",
2679 for glosse in glosses:
2682 i = find_token(document.body, glosse, i + 1)
2685 j = find_end_of_inset(document.body, i)
2687 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2690 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2691 endarg = find_end_of_inset(document.body, arg)
2694 argbeginPlain = find_token(
2695 document.body, "\\begin_layout Plain Layout", arg, endarg
2697 if argbeginPlain == -1:
2698 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2700 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2701 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2703 # remove Arg insets and paragraph, if it only contains this inset
2705 document.body[arg - 1] == "\\begin_layout Plain Layout"
2706 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2708 del document.body[arg - 1 : endarg + 4]
2710 del document.body[arg : endarg + 1]
2712 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2713 endarg = find_end_of_inset(document.body, arg)
2716 argbeginPlain = find_token(
2717 document.body, "\\begin_layout Plain Layout", arg, endarg
2719 if argbeginPlain == -1:
2720 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2722 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2723 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2725 # remove Arg insets and paragraph, if it only contains this inset
2727 document.body[arg - 1] == "\\begin_layout Plain Layout"
2728 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2730 del document.body[arg - 1 : endarg + 4]
2732 del document.body[arg : endarg + 1]
2734 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2735 endarg = find_end_of_inset(document.body, arg)
2738 argbeginPlain = find_token(
2739 document.body, "\\begin_layout Plain Layout", arg, endarg
2741 if argbeginPlain == -1:
2742 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2744 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2745 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2747 # remove Arg insets and paragraph, if it only contains this inset
2749 document.body[arg - 1] == "\\begin_layout Plain Layout"
2750 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2752 del document.body[arg - 1 : endarg + 4]
2754 del document.body[arg : endarg + 1]
2756 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2757 endarg = find_end_of_inset(document.body, arg)
2760 argbeginPlain = find_token(
2761 document.body, "\\begin_layout Plain Layout", arg, endarg
2763 if argbeginPlain == -1:
2764 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2766 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2767 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2769 # remove Arg insets and paragraph, if it only contains this inset
2771 document.body[arg - 1] == "\\begin_layout Plain Layout"
2772 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2774 del document.body[arg - 1 : endarg + 4]
2776 del document.body[arg : endarg + 1]
2779 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2782 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2783 endInset = find_end_of_inset(document.body, i)
2784 endPlain = find_end_of_layout(document.body, beginPlain)
2785 precontent = put_cmd_in_ert(cmd)
2786 if len(optargcontent) > 0:
2787 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2788 precontent += put_cmd_in_ert("{")
2791 put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2793 if cmd == "\\trigloss":
2794 postcontent += put_cmd_in_ert("}{") + marg3content
2795 postcontent += put_cmd_in_ert("}")
2797 document.body[endPlain : endInset + 1] = postcontent
2798 document.body[beginPlain + 1 : beginPlain] = precontent
2799 del document.body[i : beginPlain + 1]
2801 document.append_local_layout("Requires covington")
2806 def revert_subexarg(document):
2807 "Revert linguistic subexamples with argument to ERT"
2809 if not "linguistics" in document.get_module_list():
2815 i = find_token(document.body, "\\begin_layout Subexample", i + 1)
2818 j = find_end_of_layout(document.body, i)
2820 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2823 # check for consecutive layouts
2824 k = find_token(document.body, "\\begin_layout", j)
2825 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2827 j = find_end_of_layout(document.body, k)
2829 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2832 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2836 endarg = find_end_of_inset(document.body, arg)
2838 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2839 if argbeginPlain == -1:
2840 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2842 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2843 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2845 # remove Arg insets and paragraph, if it only contains this inset
2847 document.body[arg - 1] == "\\begin_layout Plain Layout"
2848 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2850 del document.body[arg - 1 : endarg + 4]
2852 del document.body[arg : endarg + 1]
2854 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2856 # re-find end of layout
2857 j = find_end_of_layout(document.body, i)
2859 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2862 # check for consecutive layouts
2863 k = find_token(document.body, "\\begin_layout", j)
2864 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2866 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2867 j = find_end_of_layout(document.body, k)
2869 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2872 endev = put_cmd_in_ert("\\end{subexamples}")
2874 document.body[j:j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2875 document.body[i : i + 1] = (
2876 ["\\begin_layout Standard"]
2878 + ["\\end_layout", "", "\\begin_layout Standard"]
2879 + put_cmd_in_ert("\\item ")
2882 document.append_local_layout("Requires covington")
2886 def revert_drs(document):
2887 "Revert DRS insets (linguistics) to ERT"
2889 if not "linguistics" in document.get_module_list():
2894 "\\begin_inset Flex DRS",
2895 "\\begin_inset Flex DRS*",
2896 "\\begin_inset Flex IfThen-DRS",
2897 "\\begin_inset Flex Cond-DRS",
2898 "\\begin_inset Flex QDRS",
2899 "\\begin_inset Flex NegDRS",
2900 "\\begin_inset Flex SDRS",
2905 i = find_token(document.body, drs, i + 1)
2908 j = find_end_of_inset(document.body, i)
2910 document.warning("Malformed LyX document: Can't find end of DRS inset")
2913 # Check for arguments
2914 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2915 endarg = find_end_of_inset(document.body, arg)
2918 argbeginPlain = find_token(
2919 document.body, "\\begin_layout Plain Layout", arg, endarg
2921 if argbeginPlain == -1:
2923 "Malformed LyX document: Can't find Argument 1 plain Layout"
2926 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2927 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2929 # remove Arg insets and paragraph, if it only contains this inset
2931 document.body[arg - 1] == "\\begin_layout Plain Layout"
2932 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2934 del document.body[arg - 1 : endarg + 4]
2936 del document.body[arg : endarg + 1]
2939 j = find_end_of_inset(document.body, i)
2941 document.warning("Malformed LyX document: Can't find end of DRS inset")
2944 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2945 endarg = find_end_of_inset(document.body, arg)
2948 argbeginPlain = find_token(
2949 document.body, "\\begin_layout Plain Layout", arg, endarg
2951 if argbeginPlain == -1:
2953 "Malformed LyX document: Can't find Argument 2 plain Layout"
2956 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2957 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2959 # remove Arg insets and paragraph, if it only contains this inset
2961 document.body[arg - 1] == "\\begin_layout Plain Layout"
2962 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2964 del document.body[arg - 1 : endarg + 4]
2966 del document.body[arg : endarg + 1]
2969 j = find_end_of_inset(document.body, i)
2971 document.warning("Malformed LyX document: Can't find end of DRS inset")
2974 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2975 endarg = find_end_of_inset(document.body, arg)
2976 postarg1content = []
2978 argbeginPlain = find_token(
2979 document.body, "\\begin_layout Plain Layout", arg, endarg
2981 if argbeginPlain == -1:
2983 "Malformed LyX document: Can't find Argument post:1 plain Layout"
2986 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2987 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2989 # remove Arg insets and paragraph, if it only contains this inset
2991 document.body[arg - 1] == "\\begin_layout Plain Layout"
2992 and find_end_of_layout(document.body, arg - 1) == endarg + 3
2994 del document.body[arg - 1 : endarg + 4]
2996 del document.body[arg : endarg + 1]
2999 j = find_end_of_inset(document.body, i)
3001 document.warning("Malformed LyX document: Can't find end of DRS inset")
3004 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
3005 endarg = find_end_of_inset(document.body, arg)
3006 postarg2content = []
3008 argbeginPlain = find_token(
3009 document.body, "\\begin_layout Plain Layout", arg, endarg
3011 if argbeginPlain == -1:
3013 "Malformed LyX document: Can't find Argument post:2 plain Layout"
3016 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3017 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
3019 # remove Arg insets and paragraph, if it only contains this inset
3021 document.body[arg - 1] == "\\begin_layout Plain Layout"
3022 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3024 del document.body[arg - 1 : endarg + 4]
3026 del document.body[arg : endarg + 1]
3029 j = find_end_of_inset(document.body, i)
3031 document.warning("Malformed LyX document: Can't find end of DRS inset")
3034 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
3035 endarg = find_end_of_inset(document.body, arg)
3036 postarg3content = []
3038 argbeginPlain = find_token(
3039 document.body, "\\begin_layout Plain Layout", arg, endarg
3041 if argbeginPlain == -1:
3043 "Malformed LyX document: Can't find Argument post:3 plain Layout"
3046 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3047 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
3049 # remove Arg insets and paragraph, if it only contains this inset
3051 document.body[arg - 1] == "\\begin_layout Plain Layout"
3052 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3054 del document.body[arg - 1 : endarg + 4]
3056 del document.body[arg : endarg + 1]
3059 j = find_end_of_inset(document.body, i)
3061 document.warning("Malformed LyX document: Can't find end of DRS inset")
3064 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
3065 endarg = find_end_of_inset(document.body, arg)
3066 postarg4content = []
3068 argbeginPlain = find_token(
3069 document.body, "\\begin_layout Plain Layout", arg, endarg
3071 if argbeginPlain == -1:
3073 "Malformed LyX document: Can't find Argument post:4 plain Layout"
3076 argendPlain = find_end_of_inset(document.body, argbeginPlain)
3077 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
3079 # remove Arg insets and paragraph, if it only contains this inset
3081 document.body[arg - 1] == "\\begin_layout Plain Layout"
3082 and find_end_of_layout(document.body, arg - 1) == endarg + 3
3084 del document.body[arg - 1 : endarg + 4]
3086 del document.body[arg : endarg + 1]
3088 # The respective LaTeX command
3090 if drs == "\\begin_inset Flex DRS*":
3092 elif drs == "\\begin_inset Flex IfThen-DRS":
3094 elif drs == "\\begin_inset Flex Cond-DRS":
3096 elif drs == "\\begin_inset Flex QDRS":
3098 elif drs == "\\begin_inset Flex NegDRS":
3100 elif drs == "\\begin_inset Flex SDRS":
3103 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
3104 endInset = find_end_of_inset(document.body, i)
3105 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
3106 precontent = put_cmd_in_ert(cmd)
3107 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
3108 if drs == "\\begin_inset Flex SDRS":
3109 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
3110 precontent += put_cmd_in_ert("{")
3113 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
3115 put_cmd_in_ert("}{")
3117 + put_cmd_in_ert("}{")
3119 + put_cmd_in_ert("}")
3121 if cmd == "\\condrs" or cmd == "\\qdrs":
3122 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
3124 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
3126 postcontent = put_cmd_in_ert("}")
3128 document.body[endPlain : endInset + 1] = postcontent
3129 document.body[beginPlain + 1 : beginPlain] = precontent
3130 del document.body[i : beginPlain + 1]
3132 document.append_local_layout("Provides covington 1")
3133 add_to_preamble(document, ["\\usepackage{drs,covington}"])
3138 def revert_babelfont(document):
3139 "Reverts the use of \\babelfont to user preamble"
3141 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3144 i = find_token(document.header, "\\language_package", 0)
3146 document.warning("Malformed LyX document: Missing \\language_package.")
3148 if get_value(document.header, "\\language_package", 0) != "babel":
3151 # check font settings
3153 roman = sans = typew = "default"
3155 sf_scale = tt_scale = 100.0
3157 j = find_token(document.header, "\\font_roman", 0)
3159 document.warning("Malformed LyX document: Missing \\font_roman.")
3161 # We need to use this regex since split() does not handle quote protection
3162 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3163 roman = romanfont[2].strip('"')
3164 romanfont[2] = '"default"'
3165 document.header[j] = " ".join(romanfont)
3167 j = find_token(document.header, "\\font_sans", 0)
3169 document.warning("Malformed LyX document: Missing \\font_sans.")
3171 # We need to use this regex since split() does not handle quote protection
3172 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3173 sans = sansfont[2].strip('"')
3174 sansfont[2] = '"default"'
3175 document.header[j] = " ".join(sansfont)
3177 j = find_token(document.header, "\\font_typewriter", 0)
3179 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3181 # We need to use this regex since split() does not handle quote protection
3182 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3183 typew = ttfont[2].strip('"')
3184 ttfont[2] = '"default"'
3185 document.header[j] = " ".join(ttfont)
3187 i = find_token(document.header, "\\font_osf", 0)
3189 document.warning("Malformed LyX document: Missing \\font_osf.")
3191 osf = str2bool(get_value(document.header, "\\font_osf", i))
3193 j = find_token(document.header, "\\font_sf_scale", 0)
3195 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3197 sfscale = document.header[j].split()
3200 document.header[j] = " ".join(sfscale)
3203 sf_scale = float(val)
3205 document.warning("Invalid font_sf_scale value: " + val)
3207 j = find_token(document.header, "\\font_tt_scale", 0)
3209 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3211 ttscale = document.header[j].split()
3214 document.header[j] = " ".join(ttscale)
3217 tt_scale = float(val)
3219 document.warning("Invalid font_tt_scale value: " + val)
3221 # set preamble stuff
3222 pretext = ["%% This document must be processed with xelatex or lualatex!"]
3223 pretext.append("\\AtBeginDocument{%")
3224 if roman != "default":
3225 pretext.append("\\babelfont{rm}[Mapping=tex-text]{" + roman + "}")
3226 if sans != "default":
3227 sf = "\\babelfont{sf}["
3228 if sf_scale != 100.0:
3229 sf += "Scale=" + str(sf_scale / 100.0) + ","
3230 sf += "Mapping=tex-text]{" + sans + "}"
3232 if typew != "default":
3233 tw = "\\babelfont{tt}"
3234 if tt_scale != 100.0:
3235 tw += "[Scale=" + str(tt_scale / 100.0) + "]"
3236 tw += "{" + typew + "}"
3239 pretext.append("\\defaultfontfeatures{Numbers=OldStyle}")
3241 insert_to_preamble(document, pretext)
3244 def revert_minionpro(document):
3245 "Revert native MinionPro font definition (with extra options) to LaTeX"
3247 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3250 regexp = re.compile(r"(\\font_roman_opts)")
3251 x = find_re(document.header, regexp, 0)
3255 # We need to use this regex since split() does not handle quote protection
3256 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3257 opts = romanopts[1].strip('"')
3259 i = find_token(document.header, "\\font_roman", 0)
3261 document.warning("Malformed LyX document: Missing \\font_roman.")
3264 # We need to use this regex since split() does not handle quote protection
3265 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3266 roman = romanfont[1].strip('"')
3267 if roman != "minionpro":
3269 romanfont[1] = '"default"'
3270 document.header[i] = " ".join(romanfont)
3272 j = find_token(document.header, "\\font_osf true", 0)
3275 preamble = "\\usepackage["
3277 document.header[j] = "\\font_osf false"
3281 preamble += "]{MinionPro}"
3282 add_to_preamble(document, [preamble])
3283 del document.header[x]
3286 def revert_font_opts(document):
3287 "revert font options by outputting \\setxxxfont or \\babelfont to the preamble"
3289 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3290 Babel = get_value(document.header, "\\language_package") == "babel"
3293 regexp = re.compile(r"(\\font_roman_opts)")
3294 i = find_re(document.header, regexp, 0)
3296 # We need to use this regex since split() does not handle quote protection
3297 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3298 opts = romanopts[1].strip('"')
3299 del document.header[i]
3301 regexp = re.compile(r"(\\font_roman)")
3302 i = find_re(document.header, regexp, 0)
3304 # We need to use this regex since split() does not handle quote protection
3305 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3306 font = romanfont[2].strip('"')
3307 romanfont[2] = '"default"'
3308 document.header[i] = " ".join(romanfont)
3309 if font != "default":
3311 preamble = "\\babelfont{rm}["
3313 preamble = "\\setmainfont["
3316 preamble += "Mapping=tex-text]{"
3319 add_to_preamble(document, [preamble])
3322 regexp = re.compile(r"(\\font_sans_opts)")
3323 i = find_re(document.header, regexp, 0)
3326 # We need to use this regex since split() does not handle quote protection
3327 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3328 opts = sfopts[1].strip('"')
3329 del document.header[i]
3331 regexp = re.compile(r"(\\font_sf_scale)")
3332 i = find_re(document.header, regexp, 0)
3334 scaleval = get_value(document.header, "\\font_sf_scale", i).split()[1]
3335 regexp = re.compile(r"(\\font_sans)")
3336 i = find_re(document.header, regexp, 0)
3338 # We need to use this regex since split() does not handle quote protection
3339 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3340 font = sffont[2].strip('"')
3341 sffont[2] = '"default"'
3342 document.header[i] = " ".join(sffont)
3343 if font != "default":
3345 preamble = "\\babelfont{sf}["
3347 preamble = "\\setsansfont["
3351 preamble += "Scale=0."
3352 preamble += scaleval
3354 preamble += "Mapping=tex-text]{"
3357 add_to_preamble(document, [preamble])
3360 regexp = re.compile(r"(\\font_typewriter_opts)")
3361 i = find_re(document.header, regexp, 0)
3364 # We need to use this regex since split() does not handle quote protection
3365 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3366 opts = ttopts[1].strip('"')
3367 del document.header[i]
3369 regexp = re.compile(r"(\\font_tt_scale)")
3370 i = find_re(document.header, regexp, 0)
3372 scaleval = get_value(document.header, "\\font_tt_scale", i).split()[1]
3373 regexp = re.compile(r"(\\font_typewriter)")
3374 i = find_re(document.header, regexp, 0)
3376 # We need to use this regex since split() does not handle quote protection
3377 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3378 font = ttfont[2].strip('"')
3379 ttfont[2] = '"default"'
3380 document.header[i] = " ".join(ttfont)
3381 if font != "default":
3383 preamble = "\\babelfont{tt}["
3385 preamble = "\\setmonofont["
3389 preamble += "Scale=0."
3390 preamble += scaleval
3392 preamble += "Mapping=tex-text]{"
3395 add_to_preamble(document, [preamble])
3398 def revert_plainNotoFonts_xopts(document):
3399 "Revert native (straight) Noto font definition (with extra options) to LaTeX"
3401 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3405 y = find_token(document.header, "\\font_osf true", 0)
3409 regexp = re.compile(r"(\\font_roman_opts)")
3410 x = find_re(document.header, regexp, 0)
3411 if x == -1 and not osf:
3416 # We need to use this regex since split() does not handle quote protection
3417 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3418 opts = romanopts[1].strip('"')
3424 i = find_token(document.header, "\\font_roman", 0)
3428 # We need to use this regex since split() does not handle quote protection
3429 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3430 roman = romanfont[1].strip('"')
3431 if roman != "NotoSerif-TLF":
3434 j = find_token(document.header, "\\font_sans", 0)
3438 # We need to use this regex since split() does not handle quote protection
3439 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3440 sf = sffont[1].strip('"')
3444 j = find_token(document.header, "\\font_typewriter", 0)
3448 # We need to use this regex since split() does not handle quote protection
3449 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
3450 tt = ttfont[1].strip('"')
3454 # So we have noto as "complete font"
3455 romanfont[1] = '"default"'
3456 document.header[i] = " ".join(romanfont)
3458 preamble = "\\usepackage["
3460 preamble += "]{noto}"
3461 add_to_preamble(document, [preamble])
3463 document.header[y] = "\\font_osf false"
3465 del document.header[x]
3468 def revert_notoFonts_xopts(document):
3469 "Revert native (extended) Noto font definition (with extra options) to LaTeX"
3471 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3475 fm = createFontMapping(["Noto"])
3476 if revert_fonts(document, fm, fontmap, True):
3477 add_preamble_fonts(document, fontmap)
3480 def revert_IBMFonts_xopts(document):
3481 "Revert native IBM font definition (with extra options) to LaTeX"
3483 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3487 fm = createFontMapping(["IBM"])
3488 if revert_fonts(document, fm, fontmap, True):
3489 add_preamble_fonts(document, fontmap)
3492 def revert_AdobeFonts_xopts(document):
3493 "Revert native Adobe font definition (with extra options) to LaTeX"
3495 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3499 fm = createFontMapping(["Adobe"])
3500 if revert_fonts(document, fm, fontmap, True):
3501 add_preamble_fonts(document, fontmap)
3504 def convert_osf(document):
3505 "Convert \\font_osf param to new format"
3507 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3509 i = find_token(document.header, "\\font_osf", 0)
3511 document.warning("Malformed LyX document: Missing \\font_osf.")
3516 "ADOBESourceSansPro",
3521 "NotoSansExtralight",
3523 osftt = ["ADOBESourceCodePro", "NotoMonoRegular"]
3525 osfval = str2bool(get_value(document.header, "\\font_osf", i))
3526 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
3529 document.header.insert(i, "\\font_sans_osf false")
3530 document.header.insert(i + 1, "\\font_typewriter_osf false")
3534 x = find_token(document.header, "\\font_sans", 0)
3536 document.warning("Malformed LyX document: Missing \\font_sans.")
3538 # We need to use this regex since split() does not handle quote protection
3539 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3540 sf = sffont[1].strip('"')
3542 document.header.insert(i, "\\font_sans_osf true")
3544 document.header.insert(i, "\\font_sans_osf false")
3546 x = find_token(document.header, "\\font_typewriter", 0)
3548 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3550 # We need to use this regex since split() does not handle quote protection
3551 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3552 tt = ttfont[1].strip('"')
3554 document.header.insert(i + 1, "\\font_typewriter_osf true")
3556 document.header.insert(i + 1, "\\font_typewriter_osf false")
3559 document.header.insert(i, "\\font_sans_osf false")
3560 document.header.insert(i + 1, "\\font_typewriter_osf false")
3563 def revert_osf(document):
3564 "Revert \\font_*_osf params"
3566 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3568 i = find_token(document.header, "\\font_roman_osf", 0)
3570 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
3573 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
3574 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
3576 i = find_token(document.header, "\\font_sans_osf", 0)
3578 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
3581 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
3582 del document.header[i]
3584 i = find_token(document.header, "\\font_typewriter_osf", 0)
3586 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
3589 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
3590 del document.header[i]
3593 i = find_token(document.header, "\\font_osf", 0)
3595 document.warning("Malformed LyX document: Missing \\font_osf.")
3597 document.header[i] = "\\font_osf true"
3600 def revert_texfontopts(document):
3601 "Revert native TeX font definitions (with extra options) to LaTeX"
3603 if get_bool_value(document.header, "\\use_non_tex_fonts"):
3618 # First the sf (biolinum only)
3619 regexp = re.compile(r"(\\font_sans_opts)")
3620 x = find_re(document.header, regexp, 0)
3622 # We need to use this regex since split() does not handle quote protection
3623 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3624 opts = sfopts[1].strip('"')
3625 i = find_token(document.header, "\\font_sans", 0)
3627 document.warning("Malformed LyX document: Missing \\font_sans.")
3629 # We need to use this regex since split() does not handle quote protection
3630 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3631 sans = sffont[1].strip('"')
3632 if sans == "biolinum":
3634 sffont[1] = '"default"'
3635 document.header[i] = " ".join(sffont)
3637 j = find_token(document.header, "\\font_sans_osf true", 0)
3640 k = find_token(document.header, "\\font_sf_scale", 0)
3642 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3644 sfscale = document.header[k].split()
3647 document.header[k] = " ".join(sfscale)
3650 sf_scale = float(val)
3652 document.warning("Invalid font_sf_scale value: " + val)
3653 preamble = "\\usepackage["
3655 document.header[j] = "\\font_sans_osf false"
3657 if sf_scale != 100.0:
3658 preamble += "scaled=" + str(sf_scale / 100.0) + ","
3660 preamble += "]{biolinum}"
3661 add_to_preamble(document, [preamble])
3662 del document.header[x]
3664 regexp = re.compile(r"(\\font_roman_opts)")
3665 x = find_re(document.header, regexp, 0)
3669 # We need to use this regex since split() does not handle quote protection
3670 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3671 opts = romanopts[1].strip('"')
3673 i = find_token(document.header, "\\font_roman", 0)
3675 document.warning("Malformed LyX document: Missing \\font_roman.")
3678 # We need to use this regex since split() does not handle quote protection
3679 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3680 roman = romanfont[1].strip('"')
3681 if not roman in rmfonts:
3683 romanfont[1] = '"default"'
3684 document.header[i] = " ".join(romanfont)
3686 if roman == "utopia":
3688 elif roman == "palatino":
3689 package = "mathpazo"
3690 elif roman == "times":
3691 package = "mathptmx"
3692 elif roman == "xcharter":
3693 package = "XCharter"
3695 j = find_token(document.header, "\\font_roman_osf true", 0)
3697 if roman == "cochineal":
3698 osf = "proportional,osf,"
3699 elif roman == "utopia":
3701 elif roman == "garamondx":
3703 elif roman == "libertine":
3705 elif roman == "palatino":
3707 elif roman == "xcharter":
3709 document.header[j] = "\\font_roman_osf false"
3710 k = find_token(document.header, "\\font_sc true", 0)
3712 if roman == "utopia":
3714 if roman == "palatino" and osf == "":
3716 document.header[k] = "\\font_sc false"
3717 preamble = "\\usepackage["
3720 preamble += "]{" + package + "}"
3721 add_to_preamble(document, [preamble])
3722 del document.header[x]
3725 def convert_CantarellFont(document):
3726 "Handle Cantarell font definition to LaTeX"
3728 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3729 fm = createFontMapping(["Cantarell"])
3730 convert_fonts(document, fm, "oldstyle")
3733 def revert_CantarellFont(document):
3734 "Revert native Cantarell font definition to LaTeX"
3736 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3738 fm = createFontMapping(["Cantarell"])
3739 if revert_fonts(document, fm, fontmap, False, True):
3740 add_preamble_fonts(document, fontmap)
3743 def convert_ChivoFont(document):
3744 "Handle Chivo font definition to LaTeX"
3746 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3747 fm = createFontMapping(["Chivo"])
3748 convert_fonts(document, fm, "oldstyle")
3751 def revert_ChivoFont(document):
3752 "Revert native Chivo font definition to LaTeX"
3754 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3756 fm = createFontMapping(["Chivo"])
3757 if revert_fonts(document, fm, fontmap, False, True):
3758 add_preamble_fonts(document, fontmap)
3761 def convert_FiraFont(document):
3762 "Handle Fira font definition to LaTeX"
3764 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3765 fm = createFontMapping(["Fira"])
3766 convert_fonts(document, fm, "lf")
3769 def revert_FiraFont(document):
3770 "Revert native Fira font definition to LaTeX"
3772 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3774 fm = createFontMapping(["Fira"])
3775 if revert_fonts(document, fm, fontmap, False, True):
3776 add_preamble_fonts(document, fontmap)
3779 def convert_Semibolds(document):
3780 "Move semibold options to extraopts"
3782 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
3784 i = find_token(document.header, "\\font_roman", 0)
3786 document.warning("Malformed LyX document: Missing \\font_roman.")
3788 # We need to use this regex since split() does not handle quote protection
3789 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3790 roman = romanfont[1].strip('"')
3791 if roman == "IBMPlexSerifSemibold":
3792 romanfont[1] = '"IBMPlexSerif"'
3793 document.header[i] = " ".join(romanfont)
3795 if NonTeXFonts == False:
3796 regexp = re.compile(r"(\\font_roman_opts)")
3797 x = find_re(document.header, regexp, 0)
3799 # Sensible place to insert tag
3800 fo = find_token(document.header, "\\font_sf_scale")
3802 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3804 document.header.insert(fo, '\\font_roman_opts "semibold"')
3806 # We need to use this regex since split() does not handle quote protection
3807 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3808 document.header[x] = (
3809 '\\font_roman_opts "semibold, ' + romanopts[1].strip('"') + '"'
3812 i = find_token(document.header, "\\font_sans", 0)
3814 document.warning("Malformed LyX document: Missing \\font_sans.")
3816 # We need to use this regex since split() does not handle quote protection
3817 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3818 sf = sffont[1].strip('"')
3819 if sf == "IBMPlexSansSemibold":
3820 sffont[1] = '"IBMPlexSans"'
3821 document.header[i] = " ".join(sffont)
3823 if NonTeXFonts == False:
3824 regexp = re.compile(r"(\\font_sans_opts)")
3825 x = find_re(document.header, regexp, 0)
3827 # Sensible place to insert tag
3828 fo = find_token(document.header, "\\font_sf_scale")
3830 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3832 document.header.insert(fo, '\\font_sans_opts "semibold"')
3834 # We need to use this regex since split() does not handle quote protection
3835 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3836 document.header[x] = (
3837 '\\font_sans_opts "semibold, ' + sfopts[1].strip('"') + '"'
3840 i = find_token(document.header, "\\font_typewriter", 0)
3842 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3844 # We need to use this regex since split() does not handle quote protection
3845 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3846 tt = ttfont[1].strip('"')
3847 if tt == "IBMPlexMonoSemibold":
3848 ttfont[1] = '"IBMPlexMono"'
3849 document.header[i] = " ".join(ttfont)
3851 if NonTeXFonts == False:
3852 regexp = re.compile(r"(\\font_typewriter_opts)")
3853 x = find_re(document.header, regexp, 0)
3855 # Sensible place to insert tag
3856 fo = find_token(document.header, "\\font_tt_scale")
3858 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3860 document.header.insert(fo, '\\font_typewriter_opts "semibold"')
3862 # We need to use this regex since split() does not handle quote protection
3863 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3864 document.header[x] = (
3865 '\\font_typewriter_opts "semibold, ' + ttopts[1].strip('"') + '"'
3869 def convert_NotoRegulars(document):
3870 "Merge diverse noto reagular fonts"
3872 i = find_token(document.header, "\\font_roman", 0)
3874 document.warning("Malformed LyX document: Missing \\font_roman.")
3876 # We need to use this regex since split() does not handle quote protection
3877 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3878 roman = romanfont[1].strip('"')
3879 if roman == "NotoSerif-TLF":
3880 romanfont[1] = '"NotoSerifRegular"'
3881 document.header[i] = " ".join(romanfont)
3883 i = find_token(document.header, "\\font_sans", 0)
3885 document.warning("Malformed LyX document: Missing \\font_sans.")
3887 # We need to use this regex since split() does not handle quote protection
3888 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3889 sf = sffont[1].strip('"')
3890 if sf == "NotoSans-TLF":
3891 sffont[1] = '"NotoSansRegular"'
3892 document.header[i] = " ".join(sffont)
3894 i = find_token(document.header, "\\font_typewriter", 0)
3896 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3898 # We need to use this regex since split() does not handle quote protection
3899 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3900 tt = ttfont[1].strip('"')
3901 if tt == "NotoMono-TLF":
3902 ttfont[1] = '"NotoMonoRegular"'
3903 document.header[i] = " ".join(ttfont)
3906 def convert_CrimsonProFont(document):
3907 "Handle CrimsonPro font definition to LaTeX"
3909 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3910 fm = createFontMapping(["CrimsonPro"])
3911 convert_fonts(document, fm, "lf")
3914 def revert_CrimsonProFont(document):
3915 "Revert native CrimsonPro font definition to LaTeX"
3917 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3919 fm = createFontMapping(["CrimsonPro"])
3920 if revert_fonts(document, fm, fontmap, False, True):
3921 add_preamble_fonts(document, fontmap)
3924 def revert_pagesizes(document):
3925 "Revert new page sizes in memoir and KOMA to options"
3927 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3930 i = find_token(document.header, "\\use_geometry true", 0)
3945 i = find_token(document.header, "\\papersize", 0)
3947 document.warning("Malformed LyX document! Missing \\papersize header.")
3949 val = get_value(document.header, "\\papersize", i)
3954 document.header[i] = "\\papersize default"
3956 i = find_token(document.header, "\\options", 0)
3958 i = find_token(document.header, "\\textclass", 0)
3960 document.warning("Malformed LyX document! Missing \\textclass header.")
3962 document.header.insert(i, "\\options " + val)
3964 document.header[i] = document.header[i] + "," + val
3967 def convert_pagesizes(document):
3968 "Convert to new page sizes in memoir and KOMA to options"
3970 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3973 i = find_token(document.header, "\\use_geometry true", 0)
3988 i = find_token(document.header, "\\papersize", 0)
3990 document.warning("Malformed LyX document! Missing \\papersize header.")
3992 val = get_value(document.header, "\\papersize", i)
3997 i = find_token(document.header, "\\use_geometry false", 0)
3999 # Maintain use of geometry
4000 document.header[1] = "\\use_geometry true"
4003 def revert_komafontsizes(document):
4004 "Revert new font sizes in KOMA to options"
4006 if document.textclass[:3] != "scr":
4009 i = find_token(document.header, "\\paperfontsize", 0)
4011 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
4014 defsizes = ["default", "10", "11", "12"]
4016 val = get_value(document.header, "\\paperfontsize", i)
4021 document.header[i] = "\\paperfontsize default"
4023 fsize = "fontsize=" + val
4025 i = find_token(document.header, "\\options", 0)
4027 i = find_token(document.header, "\\textclass", 0)
4029 document.warning("Malformed LyX document! Missing \\textclass header.")
4031 document.header.insert(i, "\\options " + fsize)
4033 document.header[i] = document.header[i] + "," + fsize
4036 def revert_dupqualicites(document):
4037 "Revert qualified citation list commands with duplicate keys to ERT"
4039 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
4040 # we need to revert those with multiple uses of the same key.
4044 i = find_token(document.header, "\\cite_engine", 0)
4046 document.warning("Malformed document! Missing \\cite_engine")
4048 engine = get_value(document.header, "\\cite_engine", i)
4050 if not engine in ["biblatex", "biblatex-natbib"]:
4053 # Citation insets that support qualified lists, with their LaTeX code
4057 "citet": "textcites",
4058 "Citet": "Textcites",
4059 "citep": "parencites",
4060 "Citep": "Parencites",
4061 "Footcite": "Smartcites",
4062 "footcite": "smartcites",
4063 "Autocite": "Autocites",
4064 "autocite": "autocites",
4069 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
4072 j = find_end_of_inset(document.body, i)
4074 document.warning("Can't find end of citation inset at line %d!!" % (i))
4078 k = find_token(document.body, "LatexCommand", i, j)
4080 document.warning("Can't find LatexCommand for citation inset at line %d!" % (i))
4084 cmd = get_value(document.body, "LatexCommand", k)
4085 if not cmd in list(ql_citations.keys()):
4089 pres = find_token(document.body, "pretextlist", i, j)
4090 posts = find_token(document.body, "posttextlist", i, j)
4091 if pres == -1 and posts == -1:
4096 key = get_quoted_value(document.body, "key", i, j)
4098 document.warning("Citation inset at line %d does not have a key!" % (i))
4102 keys = key.split(",")
4103 ukeys = list(set(keys))
4104 if len(keys) == len(ukeys):
4109 pretexts = get_quoted_value(document.body, "pretextlist", pres)
4110 posttexts = get_quoted_value(document.body, "posttextlist", posts)
4112 pre = get_quoted_value(document.body, "before", i, j)
4113 post = get_quoted_value(document.body, "after", i, j)
4114 prelist = pretexts.split("\t")
4117 ppp = pp.split(" ", 1)
4123 if ppp[0] in premap:
4124 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
4126 premap[ppp[0]] = val
4127 postlist = posttexts.split("\t")
4130 ppp = pp.split(" ", 1)
4136 if ppp[0] in postmap:
4137 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
4139 postmap[ppp[0]] = val
4140 # Replace known new commands with ERT
4141 if "(" in pre or ")" in pre:
4142 pre = "{" + pre + "}"
4143 if "(" in post or ")" in post:
4144 post = "{" + post + "}"
4145 res = "\\" + ql_citations[cmd]
4147 res += "(" + pre + ")"
4149 res += "(" + post + ")"
4153 if premap.get(kk, "") != "":
4154 akeys = premap[kk].split("\t", 1)
4157 res += "[" + akey + "]"
4159 premap[kk] = "\t".join(akeys[1:])
4162 if postmap.get(kk, "") != "":
4163 akeys = postmap[kk].split("\t", 1)
4166 res += "[" + akey + "]"
4168 postmap[kk] = "\t".join(akeys[1:])
4171 elif premap.get(kk, "") != "":
4173 res += "{" + kk + "}"
4174 document.body[i : j + 1] = put_cmd_in_ert([res])
4177 def convert_pagesizenames(document):
4178 "Convert LyX page sizes names"
4180 i = find_token(document.header, "\\papersize", 0)
4182 document.warning("Malformed LyX document! Missing \\papersize header.")
4210 val = get_value(document.header, "\\papersize", i)
4212 newval = val.replace("paper", "")
4213 document.header[i] = "\\papersize " + newval
4216 def revert_pagesizenames(document):
4217 "Convert LyX page sizes names"
4219 i = find_token(document.header, "\\papersize", 0)
4221 document.warning("Malformed LyX document! Missing \\papersize header.")
4249 val = get_value(document.header, "\\papersize", i)
4251 newval = val + "paper"
4252 document.header[i] = "\\papersize " + newval
4255 def revert_theendnotes(document):
4256 "Reverts native support of \\theendnotes to TeX-code"
4259 not "endnotes" in document.get_module_list()
4260 and not "foottoend" in document.get_module_list()
4266 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
4269 j = find_end_of_inset(document.body, i)
4271 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4274 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
4277 def revert_enotez(document):
4278 "Reverts native support of enotez package to TeX-code"
4281 not "enotez" in document.get_module_list()
4282 and not "foottoenotez" in document.get_module_list()
4287 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
4290 revert_flex_inset(document.body, "Endnote", "\\endnote")
4294 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
4297 j = find_end_of_inset(document.body, i)
4299 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4303 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
4306 add_to_preamble(document, ["\\usepackage{enotez}"])
4307 document.del_module("enotez")
4308 document.del_module("foottoenotez")
4311 def revert_memoir_endnotes(document):
4312 "Reverts native support of memoir endnotes to TeX-code"
4314 if document.textclass != "memoir":
4317 encommand = "\\pagenote"
4318 modules = document.get_module_list()
4321 or "foottoenotez" in modules
4322 or "endnotes" in modules
4323 or "foottoend" in modules
4325 encommand = "\\endnote"
4327 revert_flex_inset(document.body, "Endnote", encommand)
4331 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
4334 j = find_end_of_inset(document.body, i)
4336 document.warning("Malformed LyX document: Can't find end of FloatList inset")
4339 if document.body[i] == "\\begin_inset FloatList pagenote*":
4340 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
4342 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
4343 add_to_preamble(document, ["\\makepagenote"])
4346 def revert_totalheight(document):
4347 "Reverts graphics height parameter from totalheight to height"
4349 relative_heights = {
4350 "\\textwidth": "text%",
4351 "\\columnwidth": "col%",
4352 "\\paperwidth": "page%",
4353 "\\linewidth": "line%",
4354 "\\textheight": "theight%",
4355 "\\paperheight": "pheight%",
4356 "\\baselineskip ": "baselineskip%",
4360 i = find_token(document.body, "\\begin_inset Graphics", i)
4363 j = find_end_of_inset(document.body, i)
4365 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4369 rx = re.compile(r"\s*special\s*(\S+)$")
4370 rxx = re.compile(r"(\d*\.*\d+)(\S+)$")
4371 k = find_re(document.body, rx, i, j)
4375 m = rx.match(document.body[k])
4377 special = m.group(1)
4378 mspecial = special.split(",")
4379 for spc in mspecial:
4380 if spc.startswith("height="):
4381 oldheight = spc.split("=")[1]
4382 ms = rxx.search(oldheight)
4384 oldunit = ms.group(2)
4385 if oldunit in list(relative_heights.keys()):
4386 oldval = str(float(ms.group(1)) * 100)
4387 oldunit = relative_heights[oldunit]
4388 oldheight = oldval + oldunit
4389 mspecial.remove(spc)
4391 if len(mspecial) > 0:
4392 special = ",".join(mspecial)
4396 rx = re.compile(r"(\s*height\s*)(\S+)$")
4397 kk = find_re(document.body, rx, i, j)
4399 m = rx.match(document.body[kk])
4405 val = val + "," + special
4406 document.body[k] = "\tspecial " + "totalheight=" + val
4408 document.body.insert(kk, "\tspecial totalheight=" + val)
4410 document.body[kk] = m.group(1) + oldheight
4412 del document.body[kk]
4413 elif oldheight != "":
4415 document.body[k] = "\tspecial " + special
4416 document.body.insert(k, "\theight " + oldheight)
4418 document.body[k] = "\theight " + oldheight
4422 def convert_totalheight(document):
4423 "Converts graphics height parameter from totalheight to height"
4425 relative_heights = {
4426 "text%": "\\textwidth",
4427 "col%": "\\columnwidth",
4428 "page%": "\\paperwidth",
4429 "line%": "\\linewidth",
4430 "theight%": "\\textheight",
4431 "pheight%": "\\paperheight",
4432 "baselineskip%": "\\baselineskip",
4436 i = find_token(document.body, "\\begin_inset Graphics", i)
4439 j = find_end_of_inset(document.body, i)
4441 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4445 rx = re.compile(r"\s*special\s*(\S+)$")
4446 k = find_re(document.body, rx, i, j)
4450 m = rx.match(document.body[k])
4452 special = m.group(1)
4453 mspecial = special.split(",")
4454 for spc in mspecial:
4455 if spc[:12] == "totalheight=":
4456 newheight = spc.split("=")[1]
4457 mspecial.remove(spc)
4459 if len(mspecial) > 0:
4460 special = ",".join(mspecial)
4464 rx = re.compile(r"(\s*height\s*)(\d+\.?\d*)(\S+)$")
4465 kk = find_re(document.body, rx, i, j)
4467 m = rx.match(document.body[kk])
4472 if unit in list(relative_heights.keys()):
4473 val = str(float(val) / 100)
4474 unit = relative_heights[unit]
4477 val = val + unit + "," + special
4478 document.body[k] = "\tspecial " + "height=" + val
4480 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
4482 document.body[kk] = m.group(1) + newheight
4484 del document.body[kk]
4485 elif newheight != "":
4486 document.body.insert(k, "\theight " + newheight)
4490 def convert_changebars(document):
4491 "Converts the changebars module to native solution"
4493 if not "changebars" in document.get_module_list():
4496 i = find_token(document.header, "\\output_changes", 0)
4498 document.warning("Malformed LyX document! Missing \\output_changes header.")
4499 document.del_module("changebars")
4502 document.header.insert(i, "\\change_bars true")
4503 document.del_module("changebars")
4506 def revert_changebars(document):
4507 "Converts native changebar param to module"
4509 i = find_token(document.header, "\\change_bars", 0)
4511 document.warning("Malformed LyX document! Missing \\change_bars header.")
4514 val = get_value(document.header, "\\change_bars", i)
4517 document.add_module("changebars")
4519 del document.header[i]
4522 def convert_postpone_fragile(document):
4523 "Adds false \\postpone_fragile_content buffer param"
4525 i = find_token(document.header, "\\output_changes", 0)
4527 document.warning("Malformed LyX document! Missing \\output_changes header.")
4529 # Set this to false for old documents (see #2154)
4530 document.header.insert(i, "\\postpone_fragile_content false")
4533 def revert_postpone_fragile(document):
4534 "Remove \\postpone_fragile_content buffer param"
4536 i = find_token(document.header, "\\postpone_fragile_content", 0)
4538 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
4541 del document.header[i]
4544 def revert_colrow_tracking(document):
4545 "Remove change tag from tabular columns/rows"
4548 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
4551 j = find_end_of_inset(document.body, i + 1)
4553 document.warning("Malformed LyX document: Could not find end of tabular.")
4555 for k in range(i, j):
4556 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
4558 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
4559 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
4561 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', "")
4564 def convert_counter_maintenance(document):
4565 "Convert \\maintain_unincluded_children buffer param from boolean value tro tristate"
4567 i = find_token(document.header, "\\maintain_unincluded_children", 0)
4569 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
4572 val = get_value(document.header, "\\maintain_unincluded_children", i)
4575 document.header[i] = "\\maintain_unincluded_children strict"
4577 document.header[i] = "\\maintain_unincluded_children no"
4580 def revert_counter_maintenance(document):
4581 "Revert \\maintain_unincluded_children buffer param to previous boolean value"
4583 i = find_token(document.header, "\\maintain_unincluded_children", 0)
4585 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
4588 val = get_value(document.header, "\\maintain_unincluded_children", i)
4591 document.header[i] = "\\maintain_unincluded_children false"
4593 document.header[i] = "\\maintain_unincluded_children true"
4596 def revert_counter_inset(document):
4597 "Revert counter inset to ERT, where possible"
4599 needed_counters = {}
4601 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
4604 j = find_end_of_inset(document.body, i)
4606 document.warning("Can't find end of counter inset at line %d!" % i)
4609 lyx = get_quoted_value(document.body, "lyxonly", i, j)
4611 # there is nothing we can do to affect the LyX counters
4612 document.body[i : j + 1] = []
4615 cnt = get_quoted_value(document.body, "counter", i, j)
4617 document.warning("No counter given for inset at line %d!" % i)
4621 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
4622 document.warning(cmd)
4625 val = get_quoted_value(document.body, "value", i, j)
4627 document.warning("Can't convert counter inset at line %d!" % i)
4629 ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{{val}}}")
4630 elif cmd == "addto":
4631 val = get_quoted_value(document.body, "value", i, j)
4633 document.warning("Can't convert counter inset at line %d!" % i)
4635 ert = put_cmd_in_ert(f"\\addtocounter{{{cnt}}}{{{val}}}")
4636 elif cmd == "reset":
4637 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
4639 needed_counters[cnt] = 1
4640 savecnt = "LyXSave" + cnt
4641 ert = put_cmd_in_ert(f"\\setcounter{{{savecnt}}}{{\\value{{{cnt}}}}}")
4642 elif cmd == "restore":
4643 needed_counters[cnt] = 1
4644 savecnt = "LyXSave" + cnt
4645 ert = put_cmd_in_ert(f"\\setcounter{{{cnt}}}{{\\value{{{savecnt}}}}}")
4647 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
4650 document.body[i : j + 1] = ert
4655 for cnt in needed_counters:
4656 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
4658 add_to_preamble(document, pretext)
4661 def revert_ams_spaces(document):
4662 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
4664 insets = ["\\medspace{}", "\\thickspace{}"]
4665 for inset in insets:
4667 i = find_token(document.body, "\\begin_inset space " + inset, i)
4670 end = find_end_of_inset(document.body, i)
4671 subst = put_cmd_in_ert(inset)
4672 document.body[i : end + 1] = subst
4676 # load amsmath in the preamble if not already loaded
4677 i = find_token(document.header, "\\use_package amsmath 2", 0)
4679 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
4683 def convert_parskip(document):
4684 "Move old parskip settings to preamble"
4686 i = find_token(document.header, "\\paragraph_separation skip", 0)
4690 j = find_token(document.header, "\\defskip", 0)
4692 document.warning("Malformed LyX document! Missing \\defskip.")
4695 val = get_value(document.header, "\\defskip", j)
4697 skipval = "\\medskipamount"
4698 if val == "smallskip" or val == "medskip" or val == "bigskip":
4699 skipval = "\\" + val + "amount"
4705 ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"],
4708 document.header[i] = "\\paragraph_separation indent"
4709 document.header[j] = "\\paragraph_indentation default"
4712 def revert_parskip(document):
4713 "Revert new parskip settings to preamble"
4715 i = find_token(document.header, "\\paragraph_separation skip", 0)
4719 j = find_token(document.header, "\\defskip", 0)
4721 document.warning("Malformed LyX document! Missing \\defskip.")
4724 val = get_value(document.header, "\\defskip", j)
4727 if val == "smallskip" or val == "medskip" or val == "bigskip":
4728 skipval = "[skip=\\" + val + "amount]"
4729 elif val == "fullline":
4730 skipval = "[skip=\\baselineskip]"
4731 elif val != "halfline":
4732 skipval = "[skip={" + val + "}]"
4734 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
4736 document.header[i] = "\\paragraph_separation indent"
4737 document.header[j] = "\\paragraph_indentation default"
4740 def revert_line_vspaces(document):
4741 "Revert fulline and halfline vspaces to TeX"
4743 "fullline*": "\\vspace*{\\baselineskip}",
4744 "fullline": "\\vspace{\\baselineskip}",
4745 "halfline*": "\\vspace*{0.5\\baselineskip}",
4746 "halfline": "\\vspace{0.5\\baselineskip}",
4748 for inset in insets.keys():
4750 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
4753 end = find_end_of_inset(document.body, i)
4754 subst = put_cmd_in_ert(insets[inset])
4755 document.body[i : end + 1] = subst
4758 def convert_libertinus_rm_fonts(document):
4759 """Handle Libertinus serif fonts definition to LaTeX"""
4761 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
4762 fm = createFontMapping(["Libertinus"])
4763 convert_fonts(document, fm)
4766 def revert_libertinus_rm_fonts(document):
4767 """Revert Libertinus serif font definition to LaTeX"""
4769 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
4771 fm = createFontMapping(["libertinus"])
4772 if revert_fonts(document, fm, fontmap):
4773 add_preamble_fonts(document, fontmap)
4776 def revert_libertinus_sftt_fonts(document):
4777 "Revert Libertinus sans and tt font definitions to LaTeX"
4779 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
4781 i = find_token(document.header, '\\font_sans "LibertinusSans-LF"', 0)
4783 j = find_token(document.header, "\\font_sans_osf true", 0)
4785 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
4786 document.header[j] = "\\font_sans_osf false"
4788 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
4789 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
4791 sfval = find_token(document.header, "\\font_sf_scale", 0)
4793 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
4795 sfscale = document.header[sfval].split()
4798 document.header[sfval] = " ".join(sfscale)
4801 sf_scale = float(val)
4803 document.warning("Invalid font_sf_scale value: " + val)
4804 if sf_scale != "100.0":
4808 "\\renewcommand*{\\LibertinusSans@scale}{"
4809 + str(sf_scale / 100.0)
4814 i = find_token(document.header, '\\font_typewriter "LibertinusMono-TLF"', 0)
4816 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
4817 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
4819 ttval = find_token(document.header, "\\font_tt_scale", 0)
4821 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
4823 ttscale = document.header[ttval].split()
4826 document.header[ttval] = " ".join(ttscale)
4829 tt_scale = float(val)
4831 document.warning("Invalid font_tt_scale value: " + val)
4832 if tt_scale != "100.0":
4836 "\\renewcommand*{\\LibertinusMono@scale}{"
4837 + str(tt_scale / 100.0)
4843 def revert_docbook_table_output(document):
4844 i = find_token(document.header, "\\docbook_table_output")
4846 del document.header[i]
4849 def revert_nopagebreak(document):
4851 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
4854 end = find_end_of_inset(document.body, i)
4856 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
4858 subst = put_cmd_in_ert("\\nopagebreak{}")
4859 document.body[i : end + 1] = subst
4862 def revert_hrquotes(document):
4863 "Revert Hungarian Quotation marks"
4865 i = find_token(document.header, "\\quotes_style hungarian", 0)
4867 document.header[i] = "\\quotes_style polish"
4871 i = find_token(document.body, "\\begin_inset Quotes h")
4874 if document.body[i] == "\\begin_inset Quotes hld":
4875 document.body[i] = "\\begin_inset Quotes pld"
4876 elif document.body[i] == "\\begin_inset Quotes hrd":
4877 document.body[i] = "\\begin_inset Quotes prd"
4878 elif document.body[i] == "\\begin_inset Quotes hls":
4879 document.body[i] = "\\begin_inset Quotes ald"
4880 elif document.body[i] == "\\begin_inset Quotes hrs":
4881 document.body[i] = "\\begin_inset Quotes ard"
4884 def convert_math_refs(document):
4887 i = find_token(document.body, "\\begin_inset Formula", i)
4890 j = find_end_of_inset(document.body, i)
4892 document.warning("Can't find end of inset at line %d of body!" % i)
4896 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4900 def revert_math_refs(document):
4903 i = find_token(document.body, "\\begin_inset Formula", i)
4906 j = find_end_of_inset(document.body, i)
4908 document.warning("Can't find end of inset at line %d of body!" % i)
4912 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4913 if "\\labelonly" in document.body[i]:
4914 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4918 def convert_branch_colors(document):
4919 "Convert branch colors to semantic values"
4923 i = find_token(document.header, "\\branch", i)
4926 j = find_token(document.header, "\\end_branch", i)
4928 document.warning("Malformed LyX document. Can't find end of branch definition!")
4930 # We only support the standard LyX background for now
4931 k = find_token(document.header, "\\color #faf0e6", i, j)
4933 document.header[k] = "\\color background"
4937 def revert_branch_colors(document):
4938 "Revert semantic branch colors"
4942 i = find_token(document.header, "\\branch", i)
4945 j = find_token(document.header, "\\end_branch", i)
4947 document.warning("Malformed LyX document. Can't find end of branch definition!")
4949 k = find_token(document.header, "\\color", i, j)
4951 bcolor = get_value(document.header, "\\color", k)
4952 if bcolor[1] != "#":
4953 # this will be read as background by LyX 2.3
4954 document.header[k] = "\\color none"
4958 def revert_darkmode_graphics(document):
4959 "Revert darkModeSensitive InsetGraphics param"
4963 i = find_token(document.body, "\\begin_inset Graphics", i)
4966 j = find_end_of_inset(document.body, i)
4968 document.warning("Can't find end of graphics inset at line %d!!" % (i))
4971 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4973 del document.body[k]
4977 def revert_branch_darkcols(document):
4978 "Revert dark branch colors"
4982 i = find_token(document.header, "\\branch", i)
4985 j = find_token(document.header, "\\end_branch", i)
4987 document.warning("Malformed LyX document. Can't find end of branch definition!")
4989 k = find_token(document.header, "\\color", i, j)
4991 m = re.search("\\\\color (\\S+) (\\S+)", document.header[k])
4993 document.header[k] = "\\color " + m.group(1)
4997 def revert_vcolumns2(document):
4998 """Revert varwidth columns with line breaks etc."""
5000 needvarwidth = False
5002 needcellvarwidth = False
5005 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
5008 j = find_end_of_inset(document.body, i)
5010 document.warning("Malformed LyX document: Could not find end of tabular.")
5013 # Collect necessary column information
5015 nrows = int(document.body[i + 1].split('"')[3])
5016 ncols = int(document.body[i + 1].split('"')[5])
5018 for k in range(ncols):
5019 m = find_token(document.body, "<column", m)
5020 width = get_option_value(document.body[m], "width")
5021 varwidth = get_option_value(document.body[m], "varwidth")
5022 alignment = get_option_value(document.body[m], "alignment")
5023 valignment = get_option_value(document.body[m], "valignment")
5024 special = get_option_value(document.body[m], "special")
5025 col_info.append([width, varwidth, alignment, valignment, special, m])
5030 for row in range(nrows):
5031 for col in range(ncols):
5032 m = find_token(document.body, "<cell", m)
5033 multicolumn = get_option_value(document.body[m], "multicolumn") != ""
5034 multirow = get_option_value(document.body[m], "multirow") != ""
5035 fixedwidth = get_option_value(document.body[m], "width") != ""
5036 rotate = get_option_value(document.body[m], "rotate")
5037 cellalign = get_option_value(document.body[m], "alignment")
5038 cellvalign = get_option_value(document.body[m], "valignment")
5039 # Check for: linebreaks, multipars, non-standard environments
5041 endcell = find_token(document.body, "</cell>", begcell)
5044 find_token(document.body, "\\begin_inset Newline", begcell, endcell)
5047 vcand = not fixedwidth
5048 elif count_pars_in_inset(document.body, begcell + 2) > 1:
5049 vcand = not fixedwidth
5050 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
5051 vcand = not fixedwidth
5052 colalignment = col_info[col][2]
5053 colvalignment = col_info[col][3]
5055 if rotate == "" and (
5056 (colalignment == "left" and colvalignment == "top")
5059 and cellalign == "left"
5060 and cellvalign == "top"
5064 col_info[col][0] == ""
5065 and col_info[col][1] == ""
5066 and col_info[col][4] == ""
5069 col_line = col_info[col][5]
5071 vval = "V{\\linewidth}"
5073 document.body[m] = (
5074 document.body[m][:-1] + ' special="' + vval + '">'
5077 document.body[col_line] = (
5078 document.body[col_line][:-1]
5085 if multicolumn or multirow:
5086 if cellvalign == "middle":
5088 elif cellvalign == "bottom":
5091 if colvalignment == "middle":
5093 elif colvalignment == "bottom":
5095 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
5096 elt = find_token_backwards(document.body, "\\end_layout", endcell)
5097 if flt != -1 and elt != -1:
5099 # we need to reset character layouts if necessary
5100 el = find_token(document.body, "\\emph on", flt, elt)
5102 extralines.append("\\emph default")
5103 el = find_token(document.body, "\\noun on", flt, elt)
5105 extralines.append("\\noun default")
5106 el = find_token(document.body, "\\series", flt, elt)
5108 extralines.append("\\series default")
5109 el = find_token(document.body, "\\family", flt, elt)
5111 extralines.append("\\family default")
5112 el = find_token(document.body, "\\shape", flt, elt)
5114 extralines.append("\\shape default")
5115 el = find_token(document.body, "\\color", flt, elt)
5117 extralines.append("\\color inherit")
5118 el = find_token(document.body, "\\size", flt, elt)
5120 extralines.append("\\size default")
5121 el = find_token(document.body, "\\bar under", flt, elt)
5123 extralines.append("\\bar default")
5124 el = find_token(document.body, "\\uuline on", flt, elt)
5126 extralines.append("\\uuline default")
5127 el = find_token(document.body, "\\uwave on", flt, elt)
5129 extralines.append("\\uwave default")
5130 el = find_token(document.body, "\\strikeout on", flt, elt)
5132 extralines.append("\\strikeout default")
5133 document.body[elt : elt + 1] = (
5135 + put_cmd_in_ert("\\end{cellvarwidth}")
5139 for q in range(flt, elt):
5140 if document.body[q] != "" and document.body[q][0] != "\\":
5142 if document.body[q][:5] == "\\lang":
5146 document.body[parlang + 1 : parlang + 1] = put_cmd_in_ert(
5147 "\\begin{cellvarwidth}" + alarg
5150 document.body[flt + 1 : flt + 1] = put_cmd_in_ert(
5151 "\\begin{cellvarwidth}" + alarg
5153 needcellvarwidth = True
5155 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
5156 # with newlines, and we do not want that)
5158 endcell = find_token(document.body, "</cell>", begcell)
5162 "\\begin_inset Newline newline",
5169 "\\begin_inset Newline linebreak",
5176 nle = find_end_of_inset(document.body, nl)
5177 del document.body[nle : nle + 1]
5179 document.body[nl : nl + 1] = put_cmd_in_ert("\\linebreak{}")
5181 document.body[nl : nl + 1] = put_cmd_in_ert("\\\\")
5182 # Replace parbreaks in multirow with \\endgraf
5183 if multirow == True:
5184 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
5187 elt = find_end_of_layout(document.body, flt)
5190 "Malformed LyX document! Missing layout end."
5193 endcell = find_token(document.body, "</cell>", begcell)
5195 document.body, "\\begin_layout", elt, endcell
5199 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
5205 if needarray == True:
5206 add_to_preamble(document, ["\\usepackage{array}"])
5207 if needcellvarwidth == True:
5211 "%% Variable width box for table cells",
5212 "\\newenvironment{cellvarwidth}[1][t]",
5213 " {\\begin{varwidth}[#1]{\\linewidth}}",
5214 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}",
5217 if needvarwidth == True:
5218 add_to_preamble(document, ["\\usepackage{varwidth}"])
5221 def convert_vcolumns2(document):
5222 """Convert varwidth ERT to native"""
5226 i = find_token(document.body, "\\begin_inset Tabular", i + 1)
5229 j = find_end_of_inset(document.body, i)
5231 document.warning("Malformed LyX document: Could not find end of tabular.")
5235 nrows = int(document.body[i + 1].split('"')[3])
5236 ncols = int(document.body[i + 1].split('"')[5])
5239 for row in range(nrows):
5240 for col in range(ncols):
5241 m = find_token(document.body, "<cell", m)
5242 multirow = get_option_value(document.body[m], "multirow") != ""
5244 endcell = find_token(document.body, "</cell>", begcell)
5246 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
5249 document.body[cvw - 1] == "\\backslash"
5250 and get_containing_inset(document.body, cvw)[0] == "ERT"
5253 # Remove ERTs with cellvarwidth env
5254 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
5256 if document.body[ecvw - 1] == "\\backslash":
5257 eertins = get_containing_inset(document.body, ecvw)
5258 if eertins and eertins[0] == "ERT":
5259 del document.body[eertins[1] : eertins[2] + 1]
5261 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
5262 ertins = get_containing_inset(document.body, cvw)
5263 if ertins and ertins[0] == "ERT":
5264 del document.body[ertins[1] : ertins[2] + 1]
5266 # Convert ERT newlines (as cellvarwidth detection relies on that)
5268 endcell = find_token(document.body, "</cell>", begcell)
5269 nl = find_token(document.body, "\\backslash", begcell, endcell)
5270 if nl == -1 or document.body[nl + 2] != "\\backslash":
5272 ertins = get_containing_inset(document.body, nl)
5273 if ertins and ertins[0] == "ERT":
5274 document.body[ertins[1] : ertins[2] + 1] = [
5275 "\\begin_inset Newline newline",
5280 # Same for linebreaks
5282 endcell = find_token(document.body, "</cell>", begcell)
5283 nl = find_token(document.body, "linebreak", begcell, endcell)
5284 if nl == -1 or document.body[nl - 1] != "\\backslash":
5286 ertins = get_containing_inset(document.body, nl)
5287 if ertins and ertins[0] == "ERT":
5288 document.body[ertins[1] : ertins[2] + 1] = [
5289 "\\begin_inset Newline linebreak",
5295 if multirow == True:
5296 endcell = find_token(document.body, "</cell>", begcell)
5297 nl = find_token(document.body, "endgraf{}", begcell, endcell)
5298 if nl == -1 or document.body[nl - 1] != "\\backslash":
5300 ertins = get_containing_inset(document.body, nl)
5301 if ertins and ertins[0] == "ERT":
5302 document.body[ertins[1] : ertins[2] + 1] = [
5305 "\\begin_layout Plain Layout",
5315 "% Added by lyx2lyx",
5316 "%% Variable width box for table cells",
5317 r"\newenvironment{cellvarwidth}[1][t]",
5318 r" {\begin{varwidth}[#1]{\linewidth}}",
5319 r" {\@finalstrut\@arstrutbox\end{varwidth}}",
5322 del_complete_lines(document.preamble, ["% Added by lyx2lyx", r"\usepackage{varwidth}"])
5325 frontispiece_def = [
5326 r"### Inserted by lyx2lyx (frontispiece layout) ###",
5327 r"Style Frontispiece",
5328 r" CopyStyle Titlehead",
5329 r" LatexName frontispiece",
5334 def convert_koma_frontispiece(document):
5335 """Remove local KOMA frontispiece definition"""
5336 if document.textclass[:3] != "scr":
5339 if document.del_local_layout(frontispiece_def):
5340 document.add_module("ruby")
5343 def revert_koma_frontispiece(document):
5344 """Add local KOMA frontispiece definition"""
5345 if document.textclass[:3] != "scr":
5348 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
5349 document.append_local_layout(frontispiece_def)
5352 def revert_spellchecker_ignore(document):
5353 """Revert document spellchecker dictionary"""
5355 i = find_token(document.header, "\\spellchecker_ignore")
5358 del document.header[i]
5361 def revert_docbook_mathml_prefix(document):
5362 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
5364 i = find_token(document.header, "\\docbook_mathml_prefix")
5367 del document.header[i]
5370 def revert_document_metadata(document):
5371 """Revert document metadata"""
5374 i = find_token(document.header, "\\begin_metadata", i)
5377 j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
5379 # this should not happen
5381 document.header[i : j + 1] = []
5384 def revert_index_macros(document):
5385 "Revert inset index macros"
5389 # trailing blank needed here to exclude IndexMacro insets
5390 i = find_token(document.body, "\\begin_inset Index ", i + 1)
5393 j = find_end_of_inset(document.body, i)
5396 "Malformed LyX document: Can't find end of index inset at line %d" % i
5399 pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
5402 "Malformed LyX document: Can't find plain layout in index inset at line %d" % i
5405 # find, store and remove inset params
5406 pr = find_token(document.body, "range", i, pl)
5407 prval = get_quoted_value(document.body, "range", pr)
5409 if prval == "start":
5411 elif prval == "end":
5413 pf = find_token(document.body, "pageformat", i, pl)
5414 pageformat = get_quoted_value(document.body, "pageformat", pf)
5415 del document.body[pr : pf + 1]
5416 # Now re-find (potentially moved) inset end again, and search for subinsets
5417 j = find_end_of_inset(document.body, i)
5420 "Malformed LyX document: Can't find end of index inset at line %d" % i
5423 # We search for all possible subentries in turn, store their
5424 # content and delete them
5430 # Two subentries are allowed, thus the duplication
5431 imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
5432 for imacro in imacros:
5433 iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
5436 iime = find_end_of_inset(document.body, iim)
5439 "Malformed LyX document: Can't find end of index macro inset at line %d" % i
5442 iimpl = find_token(document.body, "\\begin_layout Plain Layout", iim, iime)
5445 "Malformed LyX document: Can't find plain layout in index macro inset at line %d"
5449 iimple = find_end_of_layout(document.body, iimpl)
5452 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5456 icont = document.body[iimpl:iimple]
5457 if imacro == "seealso":
5459 elif imacro == "see":
5461 elif imacro == "subentry":
5462 # subentries might hace their own sortkey!
5464 document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple
5467 xiime = find_end_of_inset(document.body, xiim)
5470 "Malformed LyX document: Can't find end of index macro inset at line %d"
5474 xiimpl = find_token(
5475 document.body, "\\begin_layout Plain Layout", xiim, xiime
5479 "Malformed LyX document: Can't find plain layout in index macro inset at line %d"
5483 xiimple = find_end_of_layout(document.body, xiimpl)
5486 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5491 xicont = document.body[xiimpl + 1 : xiimple]
5492 # everything before ................... or after
5494 document.body[iimpl + 1 : xiim]
5495 + document.body[xiime + 1 : iimple]
5497 # construct the latex sequence
5498 icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
5499 if len(subentry) > 0:
5500 if icont[0] == "\\begin_layout Plain Layout":
5501 subentry2 = icont[1:]
5505 if icont[0] == "\\begin_layout Plain Layout":
5506 subentry = icont[1:]
5509 elif imacro == "sortkey":
5511 # Everything stored. Delete subinset.
5512 del document.body[iim : iime + 1]
5513 # Again re-find (potentially moved) index inset end
5514 j = find_end_of_inset(document.body, i)
5517 "Malformed LyX document: Can't find end of index inset at line %d" % i
5520 # Now insert all stuff, starting from the inset end
5521 pl = find_token(document.body, "\\begin_layout Plain Layout", i, j)
5524 "Malformed LyX document: Can't find plain layout in index inset at line %d" % i
5527 ple = find_end_of_layout(document.body, pl)
5530 "Malformed LyX document: Can't find end of index macro inset plain layout at line %d"
5535 document.body[ple:ple] = (
5536 put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
5538 elif len(seealso) > 0:
5539 document.body[ple:ple] = (
5540 put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
5542 elif pageformat != "default":
5543 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
5544 if len(subentry2) > 0:
5545 document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
5546 if len(subentry) > 0:
5547 document.body[ple:ple] = put_cmd_in_ert("!") + subentry
5548 if len(sortkey) > 0:
5549 document.body[pl : pl + 1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
5552 def revert_starred_refs(document):
5553 "Revert starred refs"
5554 i = find_token(document.header, "\\use_hyperref true", 0)
5555 use_hyperref = i != -1
5563 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
5567 end = find_end_of_inset(document.body, i)
5570 "Malformed LyX document: Can't find end of inset at line %d" % i
5574 # If we are not using hyperref, then we just need to delete the line
5575 if not use_hyperref:
5576 k = find_token(document.body, "nolink", i, end)
5580 del document.body[k]
5583 # If we are using hyperref, then we'll need to do more.
5587 # so we are in an InsetRef
5590 # If nolink is False, just remove that line
5591 if nolink == False or cmd == "formatted" or cmd == "labelonly":
5592 # document.warning("Skipping " + cmd + " " + ref)
5593 if nolinkline != -1:
5594 del document.body[nolinkline]
5597 # We need to construct a new command and put it in ERT
5598 newcmd = "\\" + cmd + "*{" + ref + "}"
5599 # document.warning(newcmd)
5600 newlines = put_cmd_in_ert(newcmd)
5601 document.body[start : end + 1] = newlines
5602 i += len(newlines) - (end - start) + 1
5608 l = document.body[i]
5609 if l.startswith("LatexCommand"):
5611 elif l.startswith("reference"):
5613 elif l.startswith("nolink"):
5615 nolink = tmp == "true"
5620 def convert_starred_refs(document):
5621 "Convert starred refs"
5624 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
5627 end = find_end_of_inset(document.body, i)
5629 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
5633 document.body.insert(newlineat, 'nolink "false"')
5637 def revert_familydefault(document):
5638 "Revert \\font_default_family for non-TeX fonts"
5640 if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
5643 i = find_token(document.header, "\\font_default_family", 0)
5645 document.warning("Malformed LyX document: Can't find \\font_default_family header")
5648 dfamily = get_value(document.header, "\\font_default_family", i)
5649 if dfamily == "default":
5652 document.header[i] = "\\font_default_family default"
5653 add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
5656 def convert_hyper_other(document):
5657 'Classify "run:" links as other'
5661 i = find_token(document.body, "\\begin_inset CommandInset href", i)
5664 j = find_end_of_inset(document.body, i)
5666 document.warning("Cannot find end of inset at line " << str(i))
5669 k = find_token(document.body, 'type "', i, j)
5671 # not a "Web" type. Continue.
5674 t = find_token(document.body, "target", i, j)
5676 document.warning("Malformed hyperlink inset at line " + str(i))
5679 if document.body[t][8:12] == "run:":
5680 document.body.insert(t, 'type "other"')
5684 def revert_hyper_other(document):
5685 'Revert other link type to ERT and "run:" to Web'
5689 i = find_token(document.body, "\\begin_inset CommandInset href", i)
5692 j = find_end_of_inset(document.body, i)
5694 document.warning("Cannot find end of inset at line " << str(i))
5697 k = find_token(document.body, 'type "other"', i, j)
5702 n = find_token(document.body, "name", i, j)
5703 t = find_token(document.body, "target", i, j)
5704 if n == -1 or t == -1:
5705 document.warning("Malformed hyperlink inset at line " + str(i))
5708 name = document.body[n][6:-1]
5709 target = document.body[t][8:-1]
5710 if target[:4] == "run:":
5711 del document.body[k]
5713 cmd = r"\href{" + target + "}{" + name + "}"
5714 ecmd = put_cmd_in_ert(cmd)
5715 document.body[i : j + 1] = ecmd
5720 "aa": "Acknowledgments",
5721 "aapaper": "Acknowledgments",
5722 "aastex": "Acknowledgments",
5723 "aastex62": "Acknowledgments",
5724 "achemso": "Acknowledgments",
5725 "acmart": "Acknowledgments",
5726 "AEA": "Acknowledgments",
5727 "apa": "Acknowledgments",
5728 "copernicus": "Acknowledgments",
5729 "egs": "Acknowledgments", # + Acknowledgment
5730 "elsart": "Acknowledgment",
5731 "isprs": "Acknowledgments",
5732 "iucr": "Acknowledgments",
5733 "kluwer": "Acknowledgments",
5734 "svglobal3": "Acknowledgments",
5735 "svglobal": "Acknowledgment",
5736 "svjog": "Acknowledgment",
5737 "svmono": "Acknowledgment",
5738 "svmult": "Acknowledgment",
5739 "svprobth": "Acknowledgment",
5743 "aa": "Acknowledgement",
5744 "aapaper": "Acknowledgement",
5745 "aastex": "Acknowledgement",
5746 "aastex62": "Acknowledgement",
5747 "achemso": "Acknowledgement",
5748 "acmart": "Acknowledgements",
5749 "AEA": "Acknowledgement",
5750 "apa": "Acknowledgements",
5751 "copernicus": "Acknowledgements",
5752 "egs": "Acknowledgements", # + Acknowledgement
5753 "elsart": "Acknowledegment",
5754 "isprs": "Acknowledgements",
5755 "iucr": "Acknowledgements",
5756 "kluwer": "Acknowledgements",
5757 "svglobal3": "Acknowledgements",
5758 "svglobal": "Acknowledgement",
5759 "svjog": "Acknowledgement",
5760 "svmono": "Acknowledgement",
5761 "svmult": "Acknowledgement",
5762 "svprobth": "Acknowledgement",
5766 def convert_acknowledgment(document):
5767 "Fix spelling of acknowledgment styles"
5769 if document.textclass not in list(ack_layouts_old.keys()):
5775 document.body, "\\begin_layout " + ack_layouts_old[document.textclass], i
5779 document.body[i] = "\\begin_layout " + ack_layouts_new[document.textclass]
5780 if document.textclass != "egs":
5782 # egs has two styles
5785 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5788 document.body[i] = "\\begin_layout Acknowledgment"
5791 def revert_acknowledgment(document):
5792 "Restore old spelling of acknowledgment styles"
5794 if document.textclass not in list(ack_layouts_new.keys()):
5799 document.body, "\\begin_layout " + ack_layouts_new[document.textclass], i
5803 document.body[i] = "\\begin_layout " + ack_layouts_old[document.textclass]
5804 if document.textclass != "egs":
5806 # egs has two styles
5809 i = find_token(document.body, "\\begin_layout Acknowledgment", i)
5812 document.body[i] = "\\begin_layout Acknowledgement"
5816 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5817 r"### This requires theorems-ams-extended module to be loaded",
5818 r"Style Acknowledgement",
5819 r" CopyStyle Remark",
5820 r" LatexName acknowledgement",
5821 r' LabelString "Acknowledgement \thetheorem."',
5823 r" \theoremstyle{remark}",
5824 r" \newtheorem{acknowledgement}[thm]{\protect\acknowledgementname}",
5827 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5828 r" EndLangPreamble",
5830 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5831 r" EndBabelPreamble",
5832 r" DocBookTag para",
5833 r' DocBookAttr role="acknowledgement"',
5834 r' DocBookItemTag ""',
5838 ackStar_theorem_def = [
5839 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5840 r"### This requires a theorems-ams-extended-* module to be loaded",
5841 r"Style Acknowledgement*",
5842 r" CopyStyle Remark*",
5843 r" LatexName acknowledgement*",
5844 r' LabelString "Acknowledgement."',
5846 r" \theoremstyle{remark}",
5847 r" \newtheorem*{acknowledgement*}{\protect\acknowledgementname}",
5850 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5851 r" EndLangPreamble",
5853 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5854 r" EndBabelPreamble",
5855 r" DocBookTag para",
5856 r' DocBookAttr role="acknowledgement"',
5857 r' DocBookItemTag ""',
5861 ack_bytype_theorem_def = [
5862 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5863 r"### This requires theorems-ams-extended-bytype module to be loaded",
5864 r"Counter acknowledgement",
5865 r" GuiName Acknowledgment",
5867 r"Style Acknowledgement",
5868 r" CopyStyle Remark",
5869 r" LatexName acknowledgement",
5870 r' LabelString "Acknowledgement \theacknowledgement."',
5872 r" \theoremstyle{remark}",
5873 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
5876 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5877 r" EndLangPreamble",
5879 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5880 r" EndBabelPreamble",
5881 r" DocBookTag para",
5882 r' DocBookAttr role="acknowledgement"',
5883 r' DocBookItemTag ""',
5887 ack_chap_bytype_theorem_def = [
5888 r"### Inserted by lyx2lyx (ams extended theorems) ###",
5889 r"### This requires theorems-ams-extended-chap-bytype module to be loaded",
5890 r"Counter acknowledgement",
5891 r" GuiName Acknowledgment",
5894 r"Style Acknowledgement",
5895 r" CopyStyle Remark",
5896 r" LatexName acknowledgement",
5897 r' LabelString "Acknowledgement \theacknowledgement."',
5899 r" \theoremstyle{remark}",
5900 r" \ifx\thechapter\undefined",
5901 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}",
5903 r" \newtheorem{acknowledgement}{\protect\acknowledgementname}[chapter]",
5907 r" \providecommand{\acknowledgementname}{_(Acknowledgement)}",
5908 r" EndLangPreamble",
5910 r" \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}",
5911 r" EndBabelPreamble",
5912 r" DocBookTag para",
5913 r' DocBookAttr role="acknowledgement"',
5914 r' DocBookItemTag ""',
5919 def convert_ack_theorems(document):
5920 """Put removed acknowledgement theorems to local layout"""
5924 if "theorems-ams-extended-bytype" in document.get_module_list():
5927 if haveAck and haveStarAck:
5929 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5932 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5933 document.append_local_layout(ackStar_theorem_def)
5936 document.append_local_layout(ack_bytype_theorem_def)
5939 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
5942 if haveAck and haveStarAck:
5944 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5947 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5948 document.append_local_layout(ackStar_theorem_def)
5951 document.append_local_layout(ack_chap_bytype_theorem_def)
5954 elif "theorems-ams-extended" in document.get_module_list():
5957 if haveAck and haveStarAck:
5959 i = find_token(document.body, "\\begin_layout Acknowledgement", i)
5962 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
5963 document.append_local_layout(ackStar_theorem_def)
5966 document.append_local_layout(ack_theorem_def)
5971 def revert_ack_theorems(document):
5972 """Remove acknowledgement theorems from local layout"""
5973 if "theorems-ams-extended-bytype" in document.get_module_list():
5974 document.del_local_layout(ackStar_theorem_def)
5975 document.del_local_layout(ack_bytype_theorem_def)
5976 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
5977 document.del_local_layout(ackStar_theorem_def)
5978 document.del_local_layout(ack_chap_bytype_theorem_def)
5979 elif "theorems-ams-extended" in document.get_module_list():
5980 document.del_local_layout(ackStar_theorem_def)
5981 document.del_local_layout(ack_theorem_def)
5984 def revert_empty_macro(document):
5985 """Remove macros with empty LaTeX part"""
5988 i = find_token(document.body, "\\begin_inset FormulaMacro", i)
5991 cmd = document.body[i + 1]
5992 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
5995 j = find_end_of_inset(document.body, i)
5996 document.body[i : j + 1] = []
5999 def convert_empty_macro(document):
6000 """In the unlikely event someone defined a macro with empty LaTeX, add {}"""
6003 i = find_token(document.body, "\\begin_inset FormulaMacro", i)
6006 cmd = document.body[i + 1]
6007 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
6010 newstr = cmd[:-2] + "{\\{\\}}"
6011 document.body[i + 1] = newstr
6015 def convert_cov_options(document):
6016 """Update examples item argument structure"""
6018 if "linguistics" not in document.get_module_list():
6021 layouts = ["Numbered Examples (consecutive)", "Subexample"]
6023 for layout in layouts:
6026 i = find_token(document.body, "\\begin_layout %s" % layout, i)
6029 j = find_end_of_layout(document.body, i)
6032 "Malformed LyX document: Can't find end of example layout at line %d" % i
6036 k = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6038 document.body[k] = "\\begin_inset Argument item:2"
6040 # Shift gloss arguments
6043 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
6046 j = find_end_of_inset(document.body, i)
6049 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6053 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6055 document.body[k] = "\\begin_inset Argument post:4"
6056 k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6058 document.body[k] = "\\begin_inset Argument post:2"
6063 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
6066 j = find_end_of_inset(document.body, i)
6069 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6073 k = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6075 document.body[k] = "\\begin_inset Argument post:6"
6076 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6078 document.body[k] = "\\begin_inset Argument post:4"
6079 k = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6081 document.body[k] = "\\begin_inset Argument post:2"
6085 def revert_linggloss2(document):
6086 "Revert gloss with new args to ERT"
6088 if not "linguistics" in document.get_module_list():
6093 "\\begin_inset Flex Interlinear Gloss (2 Lines)",
6094 "\\begin_inset Flex Interlinear Gloss (3 Lines)",
6096 for glosse in glosses:
6099 i = find_token(document.body, glosse, i + 1)
6102 j = find_end_of_inset(document.body, i)
6104 document.warning("Malformed LyX document: Can't find end of Gloss inset")
6107 # Check if we have new options
6108 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6110 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6112 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
6117 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
6118 endarg = find_end_of_inset(document.body, arg)
6121 argbeginPlain = find_token(
6122 document.body, "\\begin_layout Plain Layout", arg, endarg
6124 if argbeginPlain == -1:
6125 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6127 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6128 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
6130 # remove Arg insets and paragraph, if it only contains this inset
6132 document.body[arg - 1] == "\\begin_layout Plain Layout"
6133 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6135 del document.body[arg - 1 : endarg + 4]
6137 del document.body[arg : endarg + 1]
6139 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
6140 endarg = find_end_of_inset(document.body, arg)
6143 argbeginPlain = find_token(
6144 document.body, "\\begin_layout Plain Layout", arg, endarg
6146 if argbeginPlain == -1:
6147 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
6149 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6150 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
6152 # remove Arg insets and paragraph, if it only contains this inset
6154 document.body[arg - 1] == "\\begin_layout Plain Layout"
6155 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6157 del document.body[arg - 1 : endarg + 4]
6159 del document.body[arg : endarg + 1]
6161 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6162 endarg = find_end_of_inset(document.body, arg)
6165 argbeginPlain = find_token(
6166 document.body, "\\begin_layout Plain Layout", arg, endarg
6168 if argbeginPlain == -1:
6169 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
6171 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6172 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
6174 # remove Arg insets and paragraph, if it only contains this inset
6176 document.body[arg - 1] == "\\begin_layout Plain Layout"
6177 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6179 del document.body[arg - 1 : endarg + 4]
6181 del document.body[arg : endarg + 1]
6183 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
6184 endarg = find_end_of_inset(document.body, arg)
6187 argbeginPlain = find_token(
6188 document.body, "\\begin_layout Plain Layout", arg, endarg
6190 if argbeginPlain == -1:
6191 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
6193 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6194 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
6196 # remove Arg insets and paragraph, if it only contains this inset
6198 document.body[arg - 1] == "\\begin_layout Plain Layout"
6199 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6201 del document.body[arg - 1 : endarg + 4]
6203 del document.body[arg : endarg + 1]
6205 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6206 endarg = find_end_of_inset(document.body, arg)
6209 argbeginPlain = find_token(
6210 document.body, "\\begin_layout Plain Layout", arg, endarg
6212 if argbeginPlain == -1:
6213 document.warning("Malformed LyX document: Can't find arg 4 plain Layout")
6215 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6216 marg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
6218 # remove Arg insets and paragraph, if it only contains this inset
6220 document.body[arg - 1] == "\\begin_layout Plain Layout"
6221 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6223 del document.body[arg - 1 : endarg + 4]
6225 del document.body[arg : endarg + 1]
6227 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
6228 endarg = find_end_of_inset(document.body, arg)
6231 argbeginPlain = find_token(
6232 document.body, "\\begin_layout Plain Layout", arg, endarg
6234 if argbeginPlain == -1:
6235 document.warning("Malformed LyX document: Can't find arg 5 plain Layout")
6237 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6238 marg5content = document.body[argbeginPlain + 1 : argendPlain - 2]
6240 # remove Arg insets and paragraph, if it only contains this inset
6242 document.body[arg - 1] == "\\begin_layout Plain Layout"
6243 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6245 del document.body[arg - 1 : endarg + 4]
6247 del document.body[arg : endarg + 1]
6249 arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
6250 endarg = find_end_of_inset(document.body, arg)
6253 argbeginPlain = find_token(
6254 document.body, "\\begin_layout Plain Layout", arg, endarg
6256 if argbeginPlain == -1:
6257 document.warning("Malformed LyX document: Can't find arg 6 plain Layout")
6259 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6260 marg6content = document.body[argbeginPlain + 1 : argendPlain - 2]
6262 # remove Arg insets and paragraph, if it only contains this inset
6264 document.body[arg - 1] == "\\begin_layout Plain Layout"
6265 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6267 del document.body[arg - 1 : endarg + 4]
6269 del document.body[arg : endarg + 1]
6272 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
6275 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
6276 endInset = find_end_of_inset(document.body, i)
6277 endPlain = find_end_of_layout(document.body, beginPlain)
6278 precontent = put_cmd_in_ert(cmd)
6279 if len(optargcontent) > 0:
6280 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
6281 precontent += put_cmd_in_ert("{")
6283 postcontent = put_cmd_in_ert("}")
6284 if len(marg1content) > 0:
6285 postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
6286 postcontent += put_cmd_in_ert("{") + marg2content + put_cmd_in_ert("}")
6287 if len(marg3content) > 0:
6288 postcontent += put_cmd_in_ert("[") + marg3content + put_cmd_in_ert("]")
6289 postcontent += put_cmd_in_ert("{") + marg4content + put_cmd_in_ert("}")
6290 if cmd == "\\trigloss":
6291 if len(marg5content) > 0:
6292 postcontent += put_cmd_in_ert("[") + marg5content + put_cmd_in_ert("]")
6293 postcontent += put_cmd_in_ert("{") + marg6content + put_cmd_in_ert("}")
6295 document.body[endPlain : endInset + 1] = postcontent
6296 document.body[beginPlain + 1 : beginPlain] = precontent
6297 del document.body[i : beginPlain + 1]
6299 document.append_local_layout("Requires covington")
6304 def revert_exarg2(document):
6305 "Revert linguistic examples with new arguments to ERT"
6307 if not "linguistics" in document.get_module_list():
6312 layouts = ["Numbered Example", "Subexample"]
6314 for layout in layouts:
6317 i = find_token(document.body, "\\begin_layout %s" % layout, i + 1)
6320 j = find_end_of_layout(document.body, i)
6322 document.warning("Malformed LyX document: Can't find end of example layout")
6324 consecex = document.body[i] == "\\begin_layout Numbered Examples (consecutive)"
6325 subexpl = document.body[i] == "\\begin_layout Subexample"
6326 singleex = document.body[i] == "\\begin_layout Numbered Examples (multiline)"
6327 layouttype = "\\begin_layout Numbered Examples (multiline)"
6329 layouttype = "\\begin_layout Numbered Examples (consecutive)"
6331 layouttype = "\\begin_layout Subexample"
6337 m = find_end_of_layout(document.body, k)
6338 # check for consecutive layouts
6339 k = find_token(document.body, "\\begin_layout", m)
6340 if k == -1 or document.body[k] != layouttype:
6342 l = find_end_of_layout(document.body, k)
6344 document.warning("Malformed LyX document: Can't find end of example layout")
6347 arg = find_token(document.body, "\\begin_inset Argument 1", i, l)
6351 != "\\begin_layout " + get_containing_layout(document.body, arg)[0]
6353 # this is not our argument!
6355 if subexpl or arg == -1:
6356 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, l)
6361 endarg = find_end_of_inset(document.body, arg)
6363 argbeginPlain = find_token(
6364 document.body, "\\begin_layout Plain Layout", arg, endarg
6366 if argbeginPlain == -1:
6367 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6369 argendPlain = find_end_of_inset(document.body, argbeginPlain)
6370 optargcontent = lyx2latex(
6371 document, document.body[argbeginPlain + 1 : argendPlain - 2]
6373 # This is a verbatim argument
6374 optargcontent = re.sub(r"textbackslash{}", r"", optargcontent)
6377 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6379 endiarg = find_end_of_inset(document.body, iarg)
6381 iargbeginPlain = find_token(
6382 document.body, "\\begin_layout Plain Layout", iarg, endiarg
6384 if iargbeginPlain == -1:
6385 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6387 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
6389 "<" + lyx2latex(document, document.body[iargbeginPlain:iargendPlain]) + ">"
6392 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6394 endiarg2 = find_end_of_inset(document.body, iarg2)
6396 iarg2beginPlain = find_token(
6397 document.body, "\\begin_layout Plain Layout", iarg2, endiarg2
6399 if iarg2beginPlain == -1:
6400 document.warning("Malformed LyX document: Can't find optarg plain Layout")
6402 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
6405 + lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
6412 # remove Arg insets and paragraph, if it only contains this inset
6415 document.body[arg - 1] == "\\begin_layout Plain Layout"
6416 and find_end_of_layout(document.body, arg - 1) == endarg + 3
6418 del document.body[arg - 1 : endarg + 4]
6420 del document.body[arg : endarg + 1]
6422 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
6424 document.warning("Unable to re-find item:1 Argument")
6426 endiarg = find_end_of_inset(document.body, iarg)
6428 document.body[iarg - 1] == "\\begin_layout Plain Layout"
6429 and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
6431 del document.body[iarg - 1 : endiarg + 4]
6433 del document.body[iarg : endiarg + 1]
6435 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6437 document.warning("Unable to re-find item:2 Argument")
6439 endiarg2 = find_end_of_inset(document.body, iarg2)
6441 document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
6442 and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
6444 del document.body[iarg2 - 1 : endiarg2 + 4]
6446 del document.body[iarg2 : endiarg2 + 1]
6450 envname = "examples"
6452 envname = "subexamples"
6454 cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
6456 # re-find end of layout
6457 j = find_end_of_layout(document.body, i)
6459 document.warning("Malformed LyX document: Can't find end of Subexample layout")
6463 # check for consecutive layouts
6464 k = find_token(document.body, "\\begin_layout", l)
6465 if k == -1 or document.body[k] != layouttype:
6469 m = find_end_of_layout(document.body, k)
6470 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
6472 endiarg = find_end_of_inset(document.body, iarg)
6474 iargbeginPlain = find_token(
6475 document.body, "\\begin_layout Plain Layout", iarg, endiarg
6477 if iargbeginPlain == -1:
6479 "Malformed LyX document: Can't find optarg plain Layout"
6482 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
6485 + lyx2latex(document, document.body[iargbeginPlain:iargendPlain])
6489 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
6491 endiarg2 = find_end_of_inset(document.body, iarg2)
6493 iarg2beginPlain = find_token(
6495 "\\begin_layout Plain Layout",
6499 if iarg2beginPlain == -1:
6501 "Malformed LyX document: Can't find optarg plain Layout"
6504 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
6507 + lyx2latex(document, document.body[iarg2beginPlain:iarg2endPlain])
6511 if subitemarg == "":
6513 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert(
6514 "\\item" + subitemarg
6516 # Refind and remove arg insets
6518 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
6520 document.warning("Unable to re-find item:1 Argument")
6522 endiarg = find_end_of_inset(document.body, iarg)
6524 document.body[iarg - 1] == "\\begin_layout Plain Layout"
6525 and find_end_of_layout(document.body, iarg - 1) == endiarg + 3
6527 del document.body[iarg - 1 : endiarg + 4]
6529 del document.body[iarg : endiarg + 1]
6531 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
6533 document.warning("Unable to re-find item:2 Argument")
6535 endiarg2 = find_end_of_inset(document.body, iarg2)
6537 document.body[iarg2 - 1] == "\\begin_layout Plain Layout"
6538 and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3
6540 del document.body[iarg2 - 1 : endiarg2 + 4]
6542 del document.body[iarg2 : endiarg2 + 1]
6544 document.body[k : k + 1] = ["\\begin_layout Standard"]
6545 l = find_end_of_layout(document.body, k)
6547 document.warning("Malformed LyX document: Can't find end of example layout")
6550 endev = put_cmd_in_ert("\\end{" + envname + "}")
6552 document.body[l:l] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
6553 document.body[i : i + 1] = (
6554 ["\\begin_layout Standard"]
6556 + ["\\end_layout", "", "\\begin_layout Standard"]
6557 + put_cmd_in_ert("\\item" + itemarg)
6560 document.append_local_layout("Requires covington")
6564 def revert_cov_options(document):
6565 """Revert examples item argument structure"""
6567 if "linguistics" not in document.get_module_list():
6570 layouts = ["Numbered Examples (consecutive)", "Subexample"]
6572 for layout in layouts:
6575 i = find_token(document.body, "\\begin_layout %s" % layout, i)
6578 j = find_end_of_layout(document.body, i)
6581 "Malformed LyX document: Can't find end of example layout at line %d" % i
6585 k = find_token(document.body, "\\begin_inset Argument item:2", i, j)
6587 document.body[k] = "\\begin_inset Argument item:1"
6589 # Shift gloss arguments
6592 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
6595 j = find_end_of_inset(document.body, i)
6598 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6602 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6604 document.body[k] = "\\begin_inset Argument post:1"
6605 k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6607 document.body[k] = "\\begin_inset Argument post:2"
6612 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
6615 j = find_end_of_inset(document.body, i)
6618 "Malformed LyX document: Can't find end of gloss inset at line %d" % i
6622 k = find_token(document.body, "\\begin_inset Argument post:2", i, j)
6624 document.body[k] = "\\begin_inset Argument post:1"
6625 k = find_token(document.body, "\\begin_inset Argument post:4", i, j)
6627 document.body[k] = "\\begin_inset Argument post:2"
6628 k = find_token(document.body, "\\begin_inset Argument post:6", i, j)
6630 document.body[k] = "\\begin_inset Argument post:3"
6634 def revert_expreambles(document):
6635 """Revert covington example preamble flex insets to ERT"""
6637 revert_flex_inset(document.body, "Example Preamble", "\\expreamble")
6638 revert_flex_inset(document.body, "Subexample Preamble", "\\subexpreamble")
6639 revert_flex_inset(document.body, "Example Postamble", "\\expostamble")
6640 revert_flex_inset(document.body, "Subexample Postamble", "\\subexpostamble")
6643 def revert_hequotes(document):
6644 "Revert Hebrew Quotation marks"
6646 i = find_token(document.header, "\\quotes_style hebrew", 0)
6648 document.header[i] = "\\quotes_style english"
6652 i = find_token(document.body, "\\begin_inset Quotes d")
6655 if document.body[i] == "\\begin_inset Quotes dld":
6656 document.body[i] = "\\begin_inset Quotes prd"
6657 elif document.body[i] == "\\begin_inset Quotes drd":
6658 document.body[i] = "\\begin_inset Quotes pld"
6659 elif document.body[i] == "\\begin_inset Quotes dls":
6660 document.body[i] = "\\begin_inset Quotes prd"
6661 elif document.body[i] == "\\begin_inset Quotes drs":
6662 document.body[i] = "\\begin_inset Quotes pld"
6665 def revert_formatted_refs(document):
6666 i = find_token(document.header, "\\use_formatted_ref", 0)
6668 del document.header[i]
6671 def revert_box_fcolor(document):
6674 i = find_token(document.body, "\\begin_inset Box Boxed", i + 1)
6677 j = find_end_of_inset(document.body, i)
6680 "Malformed LyX document: Can't find end of framed box inset at line %d" % i
6683 k = find_token(document.body, 'framecolor "default"', i, j)
6685 document.body[k] = 'framecolor "black"'
6692 supported_versions = ["2.4.0", "2.4"]
6694 [545, [convert_lst_literalparam]],
6699 [550, [convert_fontenc]],
6706 [557, [convert_vcsinfo]],
6707 [558, [removeFrontMatterStyles]],
6710 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
6714 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
6715 [566, [convert_hebrew_parentheses]],
6721 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
6722 [573, [convert_inputencoding_namechange]],
6723 [574, [convert_ruby_module, convert_utf8_japanese]],
6724 [575, [convert_lineno, convert_aaencoding]],
6726 [577, [convert_linggloss]],
6730 [581, [convert_osf]],
6737 convert_CantarellFont,
6740 ], # old font re-converterted due to extra options
6746 convert_NotoRegulars,
6747 convert_CrimsonProFont,
6751 [585, [convert_pagesizes]],
6753 [587, [convert_pagesizenames]],
6755 [589, [convert_totalheight]],
6756 [590, [convert_changebars]],
6757 [591, [convert_postpone_fragile]],
6759 [593, [convert_counter_maintenance]],
6762 [596, [convert_parskip]],
6763 [597, [convert_libertinus_rm_fonts]],
6767 [601, [convert_math_refs]],
6768 [602, [convert_branch_colors]],
6771 [605, [convert_vcolumns2]],
6772 [606, [convert_koma_frontispiece]],
6778 [612, [convert_starred_refs]],
6780 [614, [convert_hyper_other]],
6781 [615, [convert_acknowledgment, convert_ack_theorems]],
6782 [616, [convert_empty_macro]],
6783 [617, [convert_cov_options]],
6791 [619, [revert_box_fcolor]],
6792 [618, [revert_formatted_refs]],
6793 [617, [revert_hequotes]],
6794 [616, [revert_expreambles, revert_exarg2, revert_linggloss2, revert_cov_options]],
6795 [615, [revert_empty_macro]],
6796 [614, [revert_ack_theorems, revert_acknowledgment]],
6797 [613, [revert_hyper_other]],
6798 [612, [revert_familydefault]],
6799 [611, [revert_starred_refs]],
6801 [609, [revert_index_macros]],
6802 [608, [revert_document_metadata]],
6803 [607, [revert_docbook_mathml_prefix]],
6804 [606, [revert_spellchecker_ignore]],
6805 [605, [revert_koma_frontispiece]],
6806 [604, [revert_vcolumns2]],
6807 [603, [revert_branch_darkcols]],
6808 [602, [revert_darkmode_graphics]],
6809 [601, [revert_branch_colors]],
6811 [599, [revert_math_refs]],
6812 [598, [revert_hrquotes]],
6813 [598, [revert_nopagebreak]],
6814 [597, [revert_docbook_table_output]],
6815 [596, [revert_libertinus_rm_fonts, revert_libertinus_sftt_fonts]],
6816 [595, [revert_parskip, revert_line_vspaces]],
6817 [594, [revert_ams_spaces]],
6818 [593, [revert_counter_inset]],
6819 [592, [revert_counter_maintenance]],
6820 [591, [revert_colrow_tracking]],
6821 [590, [revert_postpone_fragile]],
6822 [589, [revert_changebars]],
6823 [588, [revert_totalheight]],
6824 [587, [revert_memoir_endnotes, revert_enotez, revert_theendnotes]],
6825 [586, [revert_pagesizenames]],
6826 [585, [revert_dupqualicites]],
6827 [584, [revert_pagesizes, revert_komafontsizes]],
6828 [583, [revert_vcsinfo_rev_abbrev]],
6829 [582, [revert_ChivoFont, revert_CrimsonProFont]],
6830 [581, [revert_CantarellFont, revert_FiraFont]],
6831 [580, [revert_texfontopts, revert_osf]],
6836 revert_plainNotoFonts_xopts,
6837 revert_notoFonts_xopts,
6838 revert_IBMFonts_xopts,
6839 revert_AdobeFonts_xopts,
6842 ], # keep revert_font_opts last!
6843 [578, [revert_babelfont]],
6844 [577, [revert_drs]],
6845 [576, [revert_linggloss, revert_subexarg]],
6846 [575, [revert_new_languages]],
6847 [574, [revert_lineno, revert_aaencoding]],
6848 [573, [revert_ruby_module, revert_utf8_japanese]],
6849 [572, [revert_inputencoding_namechange]],
6850 [571, [revert_notoFonts]],
6851 [570, [revert_cmidruletrimming]],
6852 [569, [revert_bibfileencodings]],
6853 [568, [revert_tablestyle]],
6854 [567, [revert_soul]],
6855 [566, [revert_malayalam]],
6856 [565, [revert_hebrew_parentheses]],
6857 [564, [revert_AdobeFonts]],
6858 [563, [revert_lformatinfo]],
6859 [562, [revert_listpargs]],
6860 [561, [revert_l7ninfo]],
6861 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
6862 [559, [revert_timeinfo, revert_namenoextinfo]],
6863 [558, [revert_dateinfo]],
6864 [557, [addFrontMatterStyles]],
6865 [556, [revert_vcsinfo]],
6866 [555, [revert_bibencoding]],
6867 [554, [revert_vcolumns]],
6868 [553, [revert_stretchcolumn]],
6869 [552, [revert_tuftecite]],
6870 [551, [revert_floatpclass, revert_floatalignment]],
6871 [550, [revert_nospellcheck]],
6872 [549, [revert_fontenc]],
6873 [548, []], # dummy format change
6874 [547, [revert_lscape]],
6875 [546, [revert_xcharter]],
6876 [545, [revert_paratype]],
6877 [544, [revert_lst_literalparam]],
6881 if __name__ == "__main__":