1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_complete_lines, del_token,
30 find_end_of, find_end_of_inset, find_end_of_layout, find_token,
31 find_token_backwards, find_token_exact, find_re, get_bool_value,
32 get_containing_inset, get_containing_layout, get_option_value, get_value,
35 # find_complete_lines,
36 # find_re, find_substring,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 """Add collected font-packages with their option to user-preamble"""
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 """Expand fontinfo mapping"""
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
197 elif font == 'libertinus':
198 fm.expandFontMapping(['libertinus,serif'], "roman", None, "libertinus", None, "osf")
199 fm.expandFontMapping(['libertinusmath'], "math", None, "libertinust1math", None, None)
202 def convert_fonts(document, fm, osfoption = "osf"):
203 """Handle font definition (LaTeX preamble -> native)"""
204 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
205 rscaleopt = re.compile(r'^scaled?=(.*)')
207 # Check whether we go beyond font option feature introduction
208 haveFontOpts = document.end_format > 580
212 i = find_re(document.preamble, rpkg, i+1)
215 mo = rpkg.search(document.preamble[i])
216 if mo == None or mo.group(2) == None:
219 options = mo.group(2).replace(' ', '').split(",")
224 while o < len(options):
225 if options[o] == osfoption:
229 mo = rscaleopt.search(options[o])
237 if not pkg in fm.pkginmap:
242 # Try with name-option combination first
243 # (only one default option supported currently)
245 while o < len(options):
247 fn = fm.getfontname(pkg, [opt])
254 fn = fm.getfontname(pkg, [])
256 fn = fm.getfontname(pkg, options)
259 del document.preamble[i]
260 fontinfo = fm.font2pkgmap[fn]
261 if fontinfo.scaletype == None:
264 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
265 fontinfo.scaleval = oscale
266 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
267 if fontinfo.osfopt == None:
268 options.extend(osfoption)
270 osf = find_token(document.header, "\\font_osf false")
271 osftag = "\\font_osf"
272 if osf == -1 and fontinfo.fonttype != "math":
273 # Try with newer format
274 osftag = "\\font_" + fontinfo.fonttype + "_osf"
275 osf = find_token(document.header, osftag + " false")
277 document.header[osf] = osftag + " true"
278 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
279 del document.preamble[i-1]
281 if fontscale != None:
282 j = find_token(document.header, fontscale, 0)
284 val = get_value(document.header, fontscale, j)
288 scale = "%03d" % int(float(oscale) * 100)
289 document.header[j] = fontscale + " " + scale + " " + vals[1]
290 ft = "\\font_" + fontinfo.fonttype
291 j = find_token(document.header, ft, 0)
293 val = get_value(document.header, ft, j)
294 words = val.split() # ! splits also values like '"DejaVu Sans"'
295 words[0] = '"' + fn + '"'
296 document.header[j] = ft + ' ' + ' '.join(words)
297 if haveFontOpts and fontinfo.fonttype != "math":
298 fotag = "\\font_" + fontinfo.fonttype + "_opts"
299 fo = find_token(document.header, fotag)
301 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
303 # Sensible place to insert tag
304 fo = find_token(document.header, "\\font_sf_scale")
306 document.warning("Malformed LyX document! Missing \\font_sf_scale")
308 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
311 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
312 """Revert native font definition to LaTeX"""
313 # fonlist := list of fonts created from the same package
314 # Empty package means that the font-name is the same as the package-name
315 # fontmap (key = package, val += found options) will be filled
316 # and used later in add_preamble_fonts() to be added to user-preamble
318 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
319 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
321 while i < len(document.header):
322 i = find_re(document.header, rfontscale, i+1)
325 mo = rfontscale.search(document.header[i])
328 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
329 val = get_value(document.header, ft, i)
330 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
331 font = words[0].strip('"') # TeX font name has no whitespace
332 if not font in fm.font2pkgmap:
334 fontinfo = fm.font2pkgmap[font]
335 val = fontinfo.package
336 if not val in fontmap:
339 if OnlyWithXOpts or WithXOpts:
340 if ft == "\\font_math":
342 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
343 if ft == "\\font_sans":
344 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
345 elif ft == "\\font_typewriter":
346 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
347 x = find_re(document.header, regexp, 0)
348 if x == -1 and OnlyWithXOpts:
352 # We need to use this regex since split() does not handle quote protection
353 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
354 opts = xopts[1].strip('"').split(",")
355 fontmap[val].extend(opts)
356 del document.header[x]
357 words[0] = '"default"'
358 document.header[i] = ft + ' ' + ' '.join(words)
359 if fontinfo.scaleopt != None:
360 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
361 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 """Rename inputencoding settings."""
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 """Rename inputencoding settings."""
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 """Handle Noto fonts definition to LaTeX"""
411 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 """Revert native Noto font definition to LaTeX"""
418 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 """Handle DejaVu and IBMPlex fonts definition to LaTeX"""
427 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 """Revert native DejaVu font definition to LaTeX"""
434 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 """Handle Adobe Source fonts definition to LaTeX"""
443 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 """Revert Adobe Source font definition to LaTeX"""
450 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 """Remove styles Begin/EndFrontmatter"""
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 """Use styles Begin/EndFrontmatter for elsarticle"""
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 """Add param literal to include inset"""
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 """Remove param literal from include inset"""
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 """Revert ParaType font definitions to LaTeX"""
559 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
560 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
561 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
562 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
563 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
566 sfval = find_token(document.header, "\\font_sf_scale", 0)
568 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
570 sfscale = document.header[sfval].split()
573 document.header[sfval] = " ".join(sfscale)
576 sf_scale = float(val)
578 document.warning("Invalid font_sf_scale value: " + val)
581 if sf_scale != "100.0":
582 sfoption = "scaled=" + str(sf_scale / 100.0)
583 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
584 ttval = get_value(document.header, "\\font_tt_scale", 0)
589 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
590 if i1 != -1 and i2 != -1 and i3!= -1:
591 add_to_preamble(document, ["\\usepackage{paratype}"])
594 add_to_preamble(document, ["\\usepackage{PTSerif}"])
595 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
598 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
600 add_to_preamble(document, ["\\usepackage{PTSans}"])
601 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
604 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
606 add_to_preamble(document, ["\\usepackage{PTMono}"])
607 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
610 def revert_xcharter(document):
611 """Revert XCharter font definitions to LaTeX"""
613 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
617 # replace unsupported font setting
618 document.header[i] = document.header[i].replace("xcharter", "default")
619 # no need for preamble code with system fonts
620 if get_bool_value(document.header, "\\use_non_tex_fonts"):
623 # transfer old style figures setting to package options
624 j = find_token(document.header, "\\font_osf true")
627 document.header[j] = "\\font_osf false"
631 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
634 def revert_lscape(document):
635 """Reverts the landscape environment (Landscape module) to TeX-code"""
637 if not "landscape" in document.get_module_list():
642 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
645 j = find_end_of_inset(document.body, i)
647 document.warning("Malformed LyX document: Can't find end of Landscape inset")
650 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
651 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
652 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
653 add_to_preamble(document, ["\\usepackage{afterpage}"])
655 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
656 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
658 add_to_preamble(document, ["\\usepackage{pdflscape}"])
659 document.del_module("landscape")
662 def convert_fontenc(document):
663 """Convert default fontenc setting"""
665 i = find_token(document.header, "\\fontencoding global", 0)
669 document.header[i] = document.header[i].replace("global", "auto")
672 def revert_fontenc(document):
673 """Revert default fontenc setting"""
675 i = find_token(document.header, "\\fontencoding auto", 0)
679 document.header[i] = document.header[i].replace("auto", "global")
682 def revert_nospellcheck(document):
683 """Remove nospellcheck font info param"""
687 i = find_token(document.body, '\\nospellcheck', i)
693 def revert_floatpclass(document):
694 """Remove float placement params 'document' and 'class'"""
696 del_token(document.header, "\\float_placement class")
700 i = find_token(document.body, '\\begin_inset Float', i + 1)
703 j = find_end_of_inset(document.body, i)
704 k = find_token(document.body, 'placement class', i, j)
706 k = find_token(document.body, 'placement document', i, j)
713 def revert_floatalignment(document):
714 """Remove float alignment params"""
716 galignment = get_value(document.header, "\\float_alignment", delete=True)
720 i = find_token(document.body, '\\begin_inset Float', i + 1)
723 j = find_end_of_inset(document.body, i)
725 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
727 k = find_token(document.body, 'alignment', i, j)
731 alignment = get_value(document.body, "alignment", k)
732 if alignment == "document":
733 alignment = galignment
735 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
737 document.warning("Can't find float layout!")
740 if alignment == "left":
741 alcmd = put_cmd_in_ert("\\raggedright{}")
742 elif alignment == "center":
743 alcmd = put_cmd_in_ert("\\centering{}")
744 elif alignment == "right":
745 alcmd = put_cmd_in_ert("\\raggedleft{}")
747 document.body[l+1:l+1] = alcmd
748 # There might be subfloats, so we do not want to move past
749 # the end of the inset.
752 def revert_tuftecite(document):
753 r"""Revert \cite commands in tufte classes"""
755 tufte = ["tufte-book", "tufte-handout"]
756 if document.textclass not in tufte:
761 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
764 j = find_end_of_inset(document.body, i)
766 document.warning("Can't find end of citation inset at line %d!!" %(i))
768 k = find_token(document.body, "LatexCommand", i, j)
770 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
773 cmd = get_value(document.body, "LatexCommand", k)
777 pre = get_quoted_value(document.body, "before", i, j)
778 post = get_quoted_value(document.body, "after", i, j)
779 key = get_quoted_value(document.body, "key", i, j)
781 document.warning("Citation inset at line %d does not have a key!" %(i))
783 # Replace command with ERT
786 res += "[" + pre + "]"
788 res += "[" + post + "]"
791 res += "{" + key + "}"
792 document.body[i:j+1] = put_cmd_in_ert([res])
797 def revert_stretchcolumn(document):
798 """We remove the column varwidth flags or everything else will become a mess."""
801 i = find_token(document.body, "\\begin_inset Tabular", i+1)
804 j = find_end_of_inset(document.body, i+1)
806 document.warning("Malformed LyX document: Could not find end of tabular.")
808 for k in range(i, j):
809 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
810 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
811 document.body[k] = document.body[k].replace(' varwidth="true"', '')
814 def revert_vcolumns(document):
815 """Revert standard columns with line breaks etc."""
821 i = find_token(document.body, "\\begin_inset Tabular", i+1)
824 j = find_end_of_inset(document.body, i)
826 document.warning("Malformed LyX document: Could not find end of tabular.")
829 # Collect necessary column information
831 nrows = int(document.body[i+1].split('"')[3])
832 ncols = int(document.body[i+1].split('"')[5])
834 for k in range(ncols):
835 m = find_token(document.body, "<column", m)
836 width = get_option_value(document.body[m], 'width')
837 varwidth = get_option_value(document.body[m], 'varwidth')
838 alignment = get_option_value(document.body[m], 'alignment')
839 special = get_option_value(document.body[m], 'special')
840 col_info.append([width, varwidth, alignment, special, m])
845 for row in range(nrows):
846 for col in range(ncols):
847 m = find_token(document.body, "<cell", m)
848 multicolumn = get_option_value(document.body[m], 'multicolumn')
849 multirow = get_option_value(document.body[m], 'multirow')
850 width = get_option_value(document.body[m], 'width')
851 rotate = get_option_value(document.body[m], 'rotate')
852 # Check for: linebreaks, multipars, non-standard environments
854 endcell = find_token(document.body, "</cell>", begcell)
856 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
858 elif count_pars_in_inset(document.body, begcell + 2) > 1:
860 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
862 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
863 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
865 alignment = col_info[col][2]
866 col_line = col_info[col][4]
868 if alignment == "center":
869 vval = ">{\\centering}"
870 elif alignment == "left":
871 vval = ">{\\raggedright}"
872 elif alignment == "right":
873 vval = ">{\\raggedleft}"
876 vval += "V{\\linewidth}"
878 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
879 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
880 # with newlines, and we do not want that)
882 endcell = find_token(document.body, "</cell>", begcell)
884 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
886 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
890 nle = find_end_of_inset(document.body, nl)
891 del(document.body[nle:nle+1])
893 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
895 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
901 if needarray == True:
902 add_to_preamble(document, ["\\usepackage{array}"])
903 if needvarwidth == True:
904 add_to_preamble(document, ["\\usepackage{varwidth}"])
907 def revert_bibencoding(document):
908 """Revert bibliography encoding"""
912 i = find_token(document.header, "\\cite_engine", 0)
914 document.warning("Malformed document! Missing \\cite_engine")
916 engine = get_value(document.header, "\\cite_engine", i)
920 if engine in ["biblatex", "biblatex-natbib"]:
923 # Map lyx to latex encoding names
927 "armscii8" : "armscii8",
928 "iso8859-1" : "latin1",
929 "iso8859-2" : "latin2",
930 "iso8859-3" : "latin3",
931 "iso8859-4" : "latin4",
932 "iso8859-5" : "iso88595",
933 "iso8859-6" : "8859-6",
934 "iso8859-7" : "iso-8859-7",
935 "iso8859-8" : "8859-8",
936 "iso8859-9" : "latin5",
937 "iso8859-13" : "latin7",
938 "iso8859-15" : "latin9",
939 "iso8859-16" : "latin10",
940 "applemac" : "applemac",
942 "cp437de" : "cp437de",
959 "utf8-platex" : "utf8",
965 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
968 j = find_end_of_inset(document.body, i)
970 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
972 encoding = get_quoted_value(document.body, "encoding", i, j)
975 # remove encoding line
976 k = find_token(document.body, "encoding", i, j)
979 if encoding == "default":
981 # Re-find inset end line
982 j = find_end_of_inset(document.body, i)
985 h = find_token(document.header, "\\biblio_options", 0)
987 biblio_options = get_value(document.header, "\\biblio_options", h)
988 if not "bibencoding" in biblio_options:
989 document.header[h] += ",bibencoding=%s" % encodings[encoding]
991 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
993 # this should not happen
994 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
996 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
998 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
999 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1005 def convert_vcsinfo(document):
1006 """Separate vcs Info inset from buffer Info inset."""
1009 "vcs-revision" : "revision",
1010 "vcs-tree-revision" : "tree-revision",
1011 "vcs-author" : "author",
1012 "vcs-time" : "time",
1017 i = find_token(document.body, "\\begin_inset Info", i+1)
1020 j = find_end_of_inset(document.body, i+1)
1022 document.warning("Malformed LyX document: Could not find end of Info inset.")
1024 tp = find_token(document.body, 'type', i, j)
1025 tpv = get_quoted_value(document.body, "type", tp)
1028 arg = find_token(document.body, 'arg', i, j)
1029 argv = get_quoted_value(document.body, "arg", arg)
1030 if argv not in list(types.keys()):
1032 document.body[tp] = "type \"vcs\""
1033 document.body[arg] = "arg \"" + types[argv] + "\""
1036 def revert_vcsinfo(document):
1037 """Merge vcs Info inset to buffer Info inset."""
1039 args = ["revision", "tree-revision", "author", "time", "date" ]
1042 i = find_token(document.body, "\\begin_inset Info", i+1)
1045 j = find_end_of_inset(document.body, i+1)
1047 document.warning("Malformed LyX document: Could not find end of Info inset.")
1049 tp = find_token(document.body, 'type', i, j)
1050 tpv = get_quoted_value(document.body, "type", tp)
1053 arg = find_token(document.body, 'arg', i, j)
1054 argv = get_quoted_value(document.body, "arg", arg)
1055 if argv not in args:
1056 document.warning("Malformed Info inset. Invalid vcs arg.")
1058 document.body[tp] = "type \"buffer\""
1059 document.body[arg] = "arg \"vcs-" + argv + "\""
1061 def revert_vcsinfo_rev_abbrev(document):
1062 " Convert abbreviated revisions to regular revisions. "
1066 i = find_token(document.body, "\\begin_inset Info", i+1)
1069 j = find_end_of_inset(document.body, i+1)
1071 document.warning("Malformed LyX document: Could not find end of Info inset.")
1073 tp = find_token(document.body, 'type', i, j)
1074 tpv = get_quoted_value(document.body, "type", tp)
1077 arg = find_token(document.body, 'arg', i, j)
1078 argv = get_quoted_value(document.body, "arg", arg)
1079 if( argv == "revision-abbrev" ):
1080 document.body[arg] = "arg \"revision\""
1082 def revert_dateinfo(document):
1083 """Revert date info insets to static text."""
1085 # FIXME This currently only considers the main language and uses the system locale
1086 # Ideally, it should honor context languages and switch the locale accordingly.
1088 # The date formats for each language using strftime syntax:
1089 # long, short, loclong, locmedium, locshort
1091 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1092 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1093 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1094 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1095 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1096 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1097 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1098 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1099 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1100 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1101 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1102 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1103 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1104 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1105 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1106 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1107 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1109 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1111 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1112 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1113 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1114 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1115 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1116 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1117 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1118 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1119 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1120 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1121 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1122 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1123 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1124 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1125 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1126 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1127 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1128 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1129 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1130 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1131 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1132 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1134 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1135 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1136 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1137 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1138 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1139 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1140 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1141 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1142 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1143 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1144 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1145 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1146 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1147 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1148 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1149 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1150 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1151 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1152 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1153 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1154 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1155 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1156 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1158 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1159 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1161 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1162 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1163 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1164 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1165 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1166 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1167 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1168 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1169 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1170 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1171 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1172 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1173 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1174 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1175 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1176 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1177 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1178 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1179 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1180 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1181 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1182 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1183 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1184 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1185 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1186 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1187 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1188 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1189 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1190 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1191 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1192 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1195 types = ["date", "fixdate", "moddate" ]
1196 lang = get_value(document.header, "\\language")
1198 document.warning("Malformed LyX document! No \\language header found!")
1203 i = find_token(document.body, "\\begin_inset Info", i+1)
1206 j = find_end_of_inset(document.body, i+1)
1208 document.warning("Malformed LyX document: Could not find end of Info inset.")
1210 tp = find_token(document.body, 'type', i, j)
1211 tpv = get_quoted_value(document.body, "type", tp)
1212 if tpv not in types:
1214 arg = find_token(document.body, 'arg', i, j)
1215 argv = get_quoted_value(document.body, "arg", arg)
1218 if tpv == "fixdate":
1219 datecomps = argv.split('@')
1220 if len(datecomps) > 1:
1222 isodate = datecomps[1]
1223 m = re.search(r'(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1225 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1226 # FIXME if we had the path to the original document (not the one in the tmp dir),
1227 # we could use the mtime.
1228 # elif tpv == "moddate":
1229 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1232 result = dte.isodate()
1233 elif argv == "long":
1234 result = dte.strftime(dateformats[lang][0])
1235 elif argv == "short":
1236 result = dte.strftime(dateformats[lang][1])
1237 elif argv == "loclong":
1238 result = dte.strftime(dateformats[lang][2])
1239 elif argv == "locmedium":
1240 result = dte.strftime(dateformats[lang][3])
1241 elif argv == "locshort":
1242 result = dte.strftime(dateformats[lang][4])
1244 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1245 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1246 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1247 fmt = re.sub('[^\'%]d', '%d', fmt)
1248 fmt = fmt.replace("'", "")
1249 result = dte.strftime(fmt)
1250 if sys.version_info < (3,0):
1251 # In Python 2, datetime module works with binary strings,
1252 # our dateformat strings are utf8-encoded:
1253 result = result.decode('utf-8')
1254 document.body[i : j+1] = [result]
1257 def revert_timeinfo(document):
1258 """Revert time info insets to static text."""
1260 # FIXME This currently only considers the main language and uses the system locale
1261 # Ideally, it should honor context languages and switch the locale accordingly.
1262 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1265 # The time formats for each language using strftime syntax:
1268 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1269 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1270 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1271 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1272 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1273 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1275 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1277 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1279 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1280 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1281 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1282 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1283 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1284 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1285 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1286 "british" : ["%H:%M:%S %Z", "%H:%M"],
1287 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1288 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1289 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1290 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1291 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1292 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1293 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1294 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1295 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1296 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1297 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1298 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1299 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1300 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1303 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1304 "french" : ["%H:%M:%S %Z", "%H:%M"],
1305 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1306 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1307 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "german" : ["%H:%M:%S %Z", "%H:%M"],
1309 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1310 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1311 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1313 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1314 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1315 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1316 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1318 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1319 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1320 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1321 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1322 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1323 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1324 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1325 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1326 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1327 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1328 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1329 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1331 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1332 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1333 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1334 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1335 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1336 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1338 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1339 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1340 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1341 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1342 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1343 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1344 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1345 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1346 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1347 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1348 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1349 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1350 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1351 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1352 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1353 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1354 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1355 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1356 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1357 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1358 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1359 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1360 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1361 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1362 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1363 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1364 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1365 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1366 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1367 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1368 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1369 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1372 types = ["time", "fixtime", "modtime" ]
1373 i = find_token(document.header, "\\language", 0)
1375 # this should not happen
1376 document.warning("Malformed LyX document! No \\language header found!")
1378 lang = get_value(document.header, "\\language", i)
1382 i = find_token(document.body, "\\begin_inset Info", i+1)
1385 j = find_end_of_inset(document.body, i+1)
1387 document.warning("Malformed LyX document: Could not find end of Info inset.")
1389 tp = find_token(document.body, 'type', i, j)
1390 tpv = get_quoted_value(document.body, "type", tp)
1391 if tpv not in types:
1393 arg = find_token(document.body, 'arg', i, j)
1394 argv = get_quoted_value(document.body, "arg", arg)
1396 dtme = datetime.now()
1398 if tpv == "fixtime":
1399 timecomps = argv.split('@')
1400 if len(timecomps) > 1:
1402 isotime = timecomps[1]
1403 m = re.search(r'(\d\d):(\d\d):(\d\d)', isotime)
1405 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1407 m = re.search(r'(\d\d):(\d\d)', isotime)
1409 tme = time(int(m.group(1)), int(m.group(2)))
1410 # FIXME if we had the path to the original document (not the one in the tmp dir),
1411 # we could use the mtime.
1412 # elif tpv == "moddate":
1413 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1416 result = tme.isoformat()
1417 elif argv == "long":
1418 result = tme.strftime(timeformats[lang][0])
1419 elif argv == "short":
1420 result = tme.strftime(timeformats[lang][1])
1422 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1423 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1424 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1425 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1426 fmt = fmt.replace("'", "")
1427 result = dte.strftime(fmt)
1428 document.body[i : j+1] = result
1431 def revert_namenoextinfo(document):
1432 """Merge buffer Info inset type name-noext to name."""
1436 i = find_token(document.body, "\\begin_inset Info", i+1)
1439 j = find_end_of_inset(document.body, i+1)
1441 document.warning("Malformed LyX document: Could not find end of Info inset.")
1443 tp = find_token(document.body, 'type', i, j)
1444 tpv = get_quoted_value(document.body, "type", tp)
1447 arg = find_token(document.body, 'arg', i, j)
1448 argv = get_quoted_value(document.body, "arg", arg)
1449 if argv != "name-noext":
1451 document.body[arg] = "arg \"name\""
1454 def revert_l7ninfo(document):
1455 """Revert l7n Info inset to text."""
1459 i = find_token(document.body, "\\begin_inset Info", i+1)
1462 j = find_end_of_inset(document.body, i+1)
1464 document.warning("Malformed LyX document: Could not find end of Info inset.")
1466 tp = find_token(document.body, 'type', i, j)
1467 tpv = get_quoted_value(document.body, "type", tp)
1470 arg = find_token(document.body, 'arg', i, j)
1471 argv = get_quoted_value(document.body, "arg", arg)
1472 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1473 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1474 document.body[i : j+1] = argv
1477 def revert_listpargs(document):
1478 """Reverts listpreamble arguments to TeX-code"""
1481 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1484 j = find_end_of_inset(document.body, i)
1485 # Find containing paragraph layout
1486 parent = get_containing_layout(document.body, i)
1488 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1491 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1492 endPlain = find_end_of_layout(document.body, beginPlain)
1493 content = document.body[beginPlain + 1 : endPlain]
1494 del document.body[i:j+1]
1495 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1496 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1497 document.body[parbeg : parbeg] = subst
1500 def revert_lformatinfo(document):
1501 """Revert layout format Info inset to text."""
1505 i = find_token(document.body, "\\begin_inset Info", i+1)
1508 j = find_end_of_inset(document.body, i+1)
1510 document.warning("Malformed LyX document: Could not find end of Info inset.")
1512 tp = find_token(document.body, 'type', i, j)
1513 tpv = get_quoted_value(document.body, "type", tp)
1514 if tpv != "lyxinfo":
1516 arg = find_token(document.body, 'arg', i, j)
1517 argv = get_quoted_value(document.body, "arg", arg)
1518 if argv != "layoutformat":
1521 document.body[i : j+1] = "69"
1524 def convert_hebrew_parentheses(document):
1525 """ Swap opening/closing parentheses in Hebrew text.
1527 Up to LyX 2.4, "(" was used as closing parenthesis and
1528 ")" as opening parenthesis for Hebrew in the LyX source.
1530 # print("convert hebrew parentheses")
1531 current_languages = [document.language]
1533 while i < len(document.body):
1534 line = document.body[i]
1535 if line.startswith('\\lang '):
1536 current_languages[-1] = line.lstrip('\\lang ')
1537 elif line.startswith('\\begin_layout'):
1538 current_languages.append(current_languages[-1])
1539 # print (line, current_languages[-1])
1540 elif line.startswith('\\end_layout'):
1541 current_languages.pop()
1542 elif line.startswith('\\begin_inset Formula'):
1543 # In math, parentheses must not be changed
1544 i = find_end_of_inset(document.body, i)
1546 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1547 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1551 def revert_hebrew_parentheses(document):
1552 """Store parentheses in Hebrew text reversed"""
1553 # This only exists to keep the convert/revert naming convention
1554 convert_hebrew_parentheses(document)
1557 def revert_malayalam(document):
1558 """Set the document language to English but assure Malayalam output"""
1560 revert_language(document, "malayalam", "", "malayalam")
1563 def revert_soul(document):
1564 """Revert soul module flex insets to ERT"""
1566 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1569 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1571 add_to_preamble(document, ["\\usepackage{soul}"])
1573 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1575 add_to_preamble(document, ["\\usepackage{color}"])
1577 revert_flex_inset(document.body, "Spaceletters", "\\so")
1578 revert_flex_inset(document.body, "Strikethrough", "\\st")
1579 revert_flex_inset(document.body, "Underline", "\\ul")
1580 revert_flex_inset(document.body, "Highlight", "\\hl")
1581 revert_flex_inset(document.body, "Capitalize", "\\caps")
1584 def revert_tablestyle(document):
1585 """Remove tablestyle params"""
1587 i = find_token(document.header, "\\tablestyle")
1589 del document.header[i]
1592 def revert_bibfileencodings(document):
1593 """Revert individual Biblatex bibliography encodings"""
1597 i = find_token(document.header, "\\cite_engine", 0)
1599 document.warning("Malformed document! Missing \\cite_engine")
1601 engine = get_value(document.header, "\\cite_engine", i)
1605 if engine in ["biblatex", "biblatex-natbib"]:
1608 # Map lyx to latex encoding names
1612 "armscii8" : "armscii8",
1613 "iso8859-1" : "latin1",
1614 "iso8859-2" : "latin2",
1615 "iso8859-3" : "latin3",
1616 "iso8859-4" : "latin4",
1617 "iso8859-5" : "iso88595",
1618 "iso8859-6" : "8859-6",
1619 "iso8859-7" : "iso-8859-7",
1620 "iso8859-8" : "8859-8",
1621 "iso8859-9" : "latin5",
1622 "iso8859-13" : "latin7",
1623 "iso8859-15" : "latin9",
1624 "iso8859-16" : "latin10",
1625 "applemac" : "applemac",
1627 "cp437de" : "cp437de",
1635 "cp1250" : "cp1250",
1636 "cp1251" : "cp1251",
1637 "cp1252" : "cp1252",
1638 "cp1255" : "cp1255",
1639 "cp1256" : "cp1256",
1640 "cp1257" : "cp1257",
1641 "koi8-r" : "koi8-r",
1642 "koi8-u" : "koi8-u",
1644 "utf8-platex" : "utf8",
1650 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1653 j = find_end_of_inset(document.body, i)
1655 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1657 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1661 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1662 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1663 if len(bibfiles) == 0:
1664 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1665 # remove encoding line
1666 k = find_token(document.body, "file_encodings", i, j)
1668 del document.body[k]
1669 # Re-find inset end line
1670 j = find_end_of_inset(document.body, i)
1672 enclist = encodings.split("\t")
1675 ppp = pp.split(" ", 1)
1676 encmap[ppp[0]] = ppp[1]
1677 for bib in bibfiles:
1678 pr = "\\addbibresource"
1679 if bib in encmap.keys():
1680 pr += "[bibencoding=" + encmap[bib] + "]"
1681 pr += "{" + bib + "}"
1682 add_to_preamble(document, [pr])
1683 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1684 pcmd = "printbibliography"
1686 pcmd += "[" + opts + "]"
1687 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1688 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1689 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1690 "status open", "", "\\begin_layout Plain Layout" ]
1691 repl += document.body[i:j+1]
1692 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1693 document.body[i:j+1] = repl
1699 def revert_cmidruletrimming(document):
1700 """Remove \\cmidrule trimming"""
1702 # FIXME: Revert to TeX code?
1705 # first, let's find out if we need to do anything
1706 i = find_token(document.body, '<cell ', i+1)
1709 j = document.body[i].find('trim="')
1712 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1713 # remove trim option
1714 document.body[i] = rgx.sub('', document.body[i])
1718 r'### Inserted by lyx2lyx (ruby inset) ###',
1719 r'InsetLayout Flex:Ruby',
1720 r' LyxType charstyle',
1721 r' LatexType command',
1725 r' HTMLInnerTag rb',
1726 r' HTMLInnerAttr ""',
1728 r' LabelString "Ruby"',
1729 r' Decoration Conglomerate',
1731 r' \ifdefined\kanjiskip',
1732 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1733 r' \else \ifdefined\luatexversion',
1734 r' \usepackage{luatexja-ruby}',
1735 r' \else \ifdefined\XeTeXversion',
1736 r' \usepackage{ruby}%',
1738 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1740 r' Argument post:1',
1741 r' LabelString "ruby text"',
1742 r' MenuString "Ruby Text|R"',
1743 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1744 r' Decoration Conglomerate',
1757 def convert_ruby_module(document):
1758 """Use ruby module instead of local module definition"""
1759 if document.del_local_layout(ruby_inset_def):
1760 document.add_module("ruby")
1763 def revert_ruby_module(document):
1764 """Replace ruby module with local module definition"""
1765 if document.del_module("ruby"):
1766 document.append_local_layout(ruby_inset_def)
1769 def convert_utf8_japanese(document):
1770 """Use generic utf8 with Japanese documents."""
1771 lang = get_value(document.header, "\\language")
1772 if not lang.startswith("japanese"):
1774 inputenc = get_value(document.header, "\\inputencoding")
1775 if ((lang == "japanese" and inputenc == "utf8-platex")
1776 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1777 document.set_parameter("inputencoding", "utf8")
1780 def revert_utf8_japanese(document):
1781 """Use Japanese utf8 variants with Japanese documents."""
1782 inputenc = get_value(document.header, "\\inputencoding")
1783 if inputenc != "utf8":
1785 lang = get_value(document.header, "\\language")
1786 if lang == "japanese":
1787 document.set_parameter("inputencoding", "utf8-platex")
1788 if lang == "japanese-cjk":
1789 document.set_parameter("inputencoding", "utf8-cjk")
1792 def revert_lineno(document):
1793 " Replace lineno setting with user-preamble code."
1795 options = get_quoted_value(document.header, "\\lineno_options",
1797 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1800 options = "[" + options + "]"
1801 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1804 def convert_lineno(document):
1805 " Replace user-preamble code with native lineno support."
1808 i = find_token(document.preamble, "\\linenumbers", 1)
1810 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1813 options = usepkg.group(1).strip("[]")
1814 del(document.preamble[i-1:i+1])
1815 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1817 k = find_token(document.header, "\\index ")
1819 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1821 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1822 "\\lineno_options %s" % options]
1825 def convert_aaencoding(document):
1826 " Convert default document option due to encoding change in aa class. "
1828 if document.textclass != "aa":
1831 i = find_token(document.header, "\\use_default_options true")
1834 val = get_value(document.header, "\\inputencoding")
1836 document.warning("Malformed LyX Document! Missing '\\inputencoding' header.")
1838 if val == "auto-legacy" or val == "latin9":
1839 document.header[i] = "\\use_default_options false"
1840 k = find_token(document.header, "\\options")
1842 document.header.insert(i, "\\options latin9")
1844 document.header[k] += ",latin9"
1847 def revert_aaencoding(document):
1848 " Revert default document option due to encoding change in aa class. "
1850 if document.textclass != "aa":
1853 i = find_token(document.header, "\\use_default_options true")
1856 val = get_value(document.header, "\\inputencoding")
1858 document.warning("Malformed LyX Document! Missing \\inputencoding header.")
1861 document.header[i] = "\\use_default_options false"
1862 k = find_token(document.header, "\\options", 0)
1864 document.header.insert(i, "\\options utf8")
1866 document.header[k] = document.header[k] + ",utf8"
1869 def revert_new_languages(document):
1870 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1871 and Russian (Petrine orthography)."""
1873 # lyxname: (babelname, polyglossianame)
1874 new_languages = {"azerbaijani": ("azerbaijani", ""),
1875 "bengali": ("", "bengali"),
1876 "churchslavonic": ("", "churchslavonic"),
1877 "oldrussian": ("", "russian"),
1878 "korean": ("", "korean"),
1880 if document.language in new_languages:
1881 used_languages = {document.language}
1883 used_languages = set()
1886 i = find_token(document.body, "\\lang", i+1)
1889 val = get_value(document.body, "\\lang", i)
1890 if val in new_languages:
1891 used_languages.add(val)
1893 # Korean is already supported via CJK, so leave as-is for Babel
1894 if ("korean" in used_languages
1895 and (not get_bool_value(document.header, "\\use_non_tex_fonts")
1896 or get_value(document.header, "\\language_package") == "babel")):
1897 used_languages.discard("korean")
1899 for lang in used_languages:
1900 revert_language(document, lang, *new_languages[lang])
1904 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1905 r'InsetLayout Flex:Glosse',
1907 r' LabelString "Gloss (old version)"',
1908 r' MenuString "Gloss (old version)"',
1909 r' LatexType environment',
1910 r' LatexName linggloss',
1911 r' Decoration minimalistic',
1916 r' CustomPars false',
1917 r' ForcePlain true',
1918 r' ParbreakIsNewline true',
1919 r' FreeSpacing true',
1920 r' Requires covington',
1923 r' \@ifundefined{linggloss}{%',
1924 r' \newenvironment{linggloss}[2][]{',
1925 r' \def\glosstr{\glt #1}%',
1927 r' {\glosstr\glend}}{}',
1930 r' ResetsFont true',
1932 r' Decoration conglomerate',
1933 r' LabelString "Translation"',
1934 r' MenuString "Glosse Translation|s"',
1935 r' Tooltip "Add a translation for the glosse"',
1940 glosss_inset_def = [
1941 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1942 r'InsetLayout Flex:Tri-Glosse',
1944 r' LabelString "Tri-Gloss (old version)"',
1945 r' MenuString "Tri-Gloss (old version)"',
1946 r' LatexType environment',
1947 r' LatexName lingglosss',
1948 r' Decoration minimalistic',
1953 r' CustomPars false',
1954 r' ForcePlain true',
1955 r' ParbreakIsNewline true',
1956 r' FreeSpacing true',
1958 r' Requires covington',
1961 r' \@ifundefined{lingglosss}{%',
1962 r' \newenvironment{lingglosss}[2][]{',
1963 r' \def\glosstr{\glt #1}%',
1965 r' {\glosstr\glend}}{}',
1967 r' ResetsFont true',
1969 r' Decoration conglomerate',
1970 r' LabelString "Translation"',
1971 r' MenuString "Glosse Translation|s"',
1972 r' Tooltip "Add a translation for the glosse"',
1977 def convert_linggloss(document):
1978 " Move old ling glosses to local layout "
1979 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1980 document.append_local_layout(gloss_inset_def)
1981 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1982 document.append_local_layout(glosss_inset_def)
1984 def revert_linggloss(document):
1985 " Revert to old ling gloss definitions "
1986 if not "linguistics" in document.get_module_list():
1988 document.del_local_layout(gloss_inset_def)
1989 document.del_local_layout(glosss_inset_def)
1992 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1993 for glosse in glosses:
1996 i = find_token(document.body, glosse, i+1)
1999 j = find_end_of_inset(document.body, i)
2001 document.warning("Malformed LyX document: Can't find end of Gloss inset")
2004 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2005 endarg = find_end_of_inset(document.body, arg)
2008 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2009 if argbeginPlain == -1:
2010 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2012 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2013 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
2015 # remove Arg insets and paragraph, if it only contains this inset
2016 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2017 del document.body[arg - 1 : endarg + 4]
2019 del document.body[arg : endarg + 1]
2021 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2022 endarg = find_end_of_inset(document.body, arg)
2025 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2026 if argbeginPlain == -1:
2027 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
2029 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2030 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2032 # remove Arg insets and paragraph, if it only contains this inset
2033 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2034 del document.body[arg - 1 : endarg + 4]
2036 del document.body[arg : endarg + 1]
2038 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2039 endarg = find_end_of_inset(document.body, arg)
2042 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2043 if argbeginPlain == -1:
2044 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
2046 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2047 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2049 # remove Arg insets and paragraph, if it only contains this inset
2050 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2051 del document.body[arg - 1 : endarg + 4]
2053 del document.body[arg : endarg + 1]
2055 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2056 endarg = find_end_of_inset(document.body, arg)
2059 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2060 if argbeginPlain == -1:
2061 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
2063 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2064 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2066 # remove Arg insets and paragraph, if it only contains this inset
2067 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2068 del document.body[arg - 1 : endarg + 4]
2070 del document.body[arg : endarg + 1]
2073 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2076 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2077 endInset = find_end_of_inset(document.body, i)
2078 endPlain = find_end_of_layout(document.body, beginPlain)
2079 precontent = put_cmd_in_ert(cmd)
2080 if len(optargcontent) > 0:
2081 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2082 precontent += put_cmd_in_ert("{")
2084 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2085 if cmd == "\\trigloss":
2086 postcontent += put_cmd_in_ert("}{") + marg3content
2087 postcontent += put_cmd_in_ert("}")
2089 document.body[endPlain:endInset + 1] = postcontent
2090 document.body[beginPlain + 1:beginPlain] = precontent
2091 del document.body[i : beginPlain + 1]
2093 document.append_local_layout("Requires covington")
2098 def revert_subexarg(document):
2099 " Revert linguistic subexamples with argument to ERT "
2101 if not "linguistics" in document.get_module_list():
2107 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2110 j = find_end_of_layout(document.body, i)
2112 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2115 # check for consecutive layouts
2116 k = find_token(document.body, "\\begin_layout", j)
2117 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2119 j = find_end_of_layout(document.body, k)
2121 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2124 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2128 endarg = find_end_of_inset(document.body, arg)
2130 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2131 if argbeginPlain == -1:
2132 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2134 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2135 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2137 # remove Arg insets and paragraph, if it only contains this inset
2138 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2139 del document.body[arg - 1 : endarg + 4]
2141 del document.body[arg : endarg + 1]
2143 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2145 # re-find end of layout
2146 j = find_end_of_layout(document.body, i)
2148 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2151 # check for consecutive layouts
2152 k = find_token(document.body, "\\begin_layout", j)
2153 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2155 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2156 j = find_end_of_layout(document.body, k)
2158 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2161 endev = put_cmd_in_ert("\\end{subexamples}")
2163 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2164 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2165 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2167 document.append_local_layout("Requires covington")
2171 def revert_drs(document):
2172 " Revert DRS insets (linguistics) to ERT "
2174 if not "linguistics" in document.get_module_list():
2178 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2179 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2180 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2181 "\\begin_inset Flex SDRS"]
2185 i = find_token(document.body, drs, i+1)
2188 j = find_end_of_inset(document.body, i)
2190 document.warning("Malformed LyX document: Can't find end of DRS inset")
2193 # Check for arguments
2194 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2195 endarg = find_end_of_inset(document.body, arg)
2198 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2199 if argbeginPlain == -1:
2200 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2202 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2203 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2205 # remove Arg insets and paragraph, if it only contains this inset
2206 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2207 del document.body[arg - 1 : endarg + 4]
2209 del document.body[arg : endarg + 1]
2212 j = find_end_of_inset(document.body, i)
2214 document.warning("Malformed LyX document: Can't find end of DRS inset")
2217 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2218 endarg = find_end_of_inset(document.body, arg)
2221 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2222 if argbeginPlain == -1:
2223 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2225 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2226 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2228 # remove Arg insets and paragraph, if it only contains this inset
2229 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2230 del document.body[arg - 1 : endarg + 4]
2232 del document.body[arg : endarg + 1]
2235 j = find_end_of_inset(document.body, i)
2237 document.warning("Malformed LyX document: Can't find end of DRS inset")
2240 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2241 endarg = find_end_of_inset(document.body, arg)
2242 postarg1content = []
2244 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2245 if argbeginPlain == -1:
2246 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2248 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2249 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2251 # remove Arg insets and paragraph, if it only contains this inset
2252 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2253 del document.body[arg - 1 : endarg + 4]
2255 del document.body[arg : endarg + 1]
2258 j = find_end_of_inset(document.body, i)
2260 document.warning("Malformed LyX document: Can't find end of DRS inset")
2263 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2264 endarg = find_end_of_inset(document.body, arg)
2265 postarg2content = []
2267 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2268 if argbeginPlain == -1:
2269 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2271 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2272 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2274 # remove Arg insets and paragraph, if it only contains this inset
2275 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2276 del document.body[arg - 1 : endarg + 4]
2278 del document.body[arg : endarg + 1]
2281 j = find_end_of_inset(document.body, i)
2283 document.warning("Malformed LyX document: Can't find end of DRS inset")
2286 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2287 endarg = find_end_of_inset(document.body, arg)
2288 postarg3content = []
2290 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2291 if argbeginPlain == -1:
2292 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2294 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2295 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2297 # remove Arg insets and paragraph, if it only contains this inset
2298 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2299 del document.body[arg - 1 : endarg + 4]
2301 del document.body[arg : endarg + 1]
2304 j = find_end_of_inset(document.body, i)
2306 document.warning("Malformed LyX document: Can't find end of DRS inset")
2309 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2310 endarg = find_end_of_inset(document.body, arg)
2311 postarg4content = []
2313 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2314 if argbeginPlain == -1:
2315 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2317 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2318 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2320 # remove Arg insets and paragraph, if it only contains this inset
2321 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2322 del document.body[arg - 1 : endarg + 4]
2324 del document.body[arg : endarg + 1]
2326 # The respective LaTeX command
2328 if drs == "\\begin_inset Flex DRS*":
2330 elif drs == "\\begin_inset Flex IfThen-DRS":
2332 elif drs == "\\begin_inset Flex Cond-DRS":
2334 elif drs == "\\begin_inset Flex QDRS":
2336 elif drs == "\\begin_inset Flex NegDRS":
2338 elif drs == "\\begin_inset Flex SDRS":
2341 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2342 endInset = find_end_of_inset(document.body, i)
2343 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2344 precontent = put_cmd_in_ert(cmd)
2345 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2346 if drs == "\\begin_inset Flex SDRS":
2347 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2348 precontent += put_cmd_in_ert("{")
2351 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2352 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2353 if cmd == "\\condrs" or cmd == "\\qdrs":
2354 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2356 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2358 postcontent = put_cmd_in_ert("}")
2360 document.body[endPlain:endInset + 1] = postcontent
2361 document.body[beginPlain + 1:beginPlain] = precontent
2362 del document.body[i : beginPlain + 1]
2364 document.append_local_layout("Provides covington 1")
2365 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2371 def revert_babelfont(document):
2372 " Reverts the use of \\babelfont to user preamble "
2374 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2377 i = find_token(document.header, '\\language_package', 0)
2379 document.warning("Malformed LyX document: Missing \\language_package.")
2381 if get_value(document.header, "\\language_package", 0) != "babel":
2384 # check font settings
2386 roman = sans = typew = "default"
2388 sf_scale = tt_scale = 100.0
2390 j = find_token(document.header, "\\font_roman", 0)
2392 document.warning("Malformed LyX document: Missing \\font_roman.")
2394 # We need to use this regex since split() does not handle quote protection
2395 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2396 roman = romanfont[2].strip('"')
2397 romanfont[2] = '"default"'
2398 document.header[j] = " ".join(romanfont)
2400 j = find_token(document.header, "\\font_sans", 0)
2402 document.warning("Malformed LyX document: Missing \\font_sans.")
2404 # We need to use this regex since split() does not handle quote protection
2405 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2406 sans = sansfont[2].strip('"')
2407 sansfont[2] = '"default"'
2408 document.header[j] = " ".join(sansfont)
2410 j = find_token(document.header, "\\font_typewriter", 0)
2412 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2414 # We need to use this regex since split() does not handle quote protection
2415 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2416 typew = ttfont[2].strip('"')
2417 ttfont[2] = '"default"'
2418 document.header[j] = " ".join(ttfont)
2420 i = find_token(document.header, "\\font_osf", 0)
2422 document.warning("Malformed LyX document: Missing \\font_osf.")
2424 osf = str2bool(get_value(document.header, "\\font_osf", i))
2426 j = find_token(document.header, "\\font_sf_scale", 0)
2428 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2430 sfscale = document.header[j].split()
2433 document.header[j] = " ".join(sfscale)
2436 sf_scale = float(val)
2438 document.warning("Invalid font_sf_scale value: " + val)
2440 j = find_token(document.header, "\\font_tt_scale", 0)
2442 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2444 ttscale = document.header[j].split()
2447 document.header[j] = " ".join(ttscale)
2450 tt_scale = float(val)
2452 document.warning("Invalid font_tt_scale value: " + val)
2454 # set preamble stuff
2455 pretext = ['%% This document must be processed with xelatex or lualatex!']
2456 pretext.append('\\AtBeginDocument{%')
2457 if roman != "default":
2458 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2459 if sans != "default":
2460 sf = '\\babelfont{sf}['
2461 if sf_scale != 100.0:
2462 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2463 sf += 'Mapping=tex-text]{' + sans + '}'
2465 if typew != "default":
2466 tw = '\\babelfont{tt}'
2467 if tt_scale != 100.0:
2468 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2469 tw += '{' + typew + '}'
2472 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2474 insert_to_preamble(document, pretext)
2477 def revert_minionpro(document):
2478 " Revert native MinionPro font definition (with extra options) to LaTeX "
2480 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2483 regexp = re.compile(r'(\\font_roman_opts)')
2484 x = find_re(document.header, regexp, 0)
2488 # We need to use this regex since split() does not handle quote protection
2489 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2490 opts = romanopts[1].strip('"')
2492 i = find_token(document.header, "\\font_roman", 0)
2494 document.warning("Malformed LyX document: Missing \\font_roman.")
2497 # We need to use this regex since split() does not handle quote protection
2498 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2499 roman = romanfont[1].strip('"')
2500 if roman != "minionpro":
2502 romanfont[1] = '"default"'
2503 document.header[i] = " ".join(romanfont)
2505 j = find_token(document.header, "\\font_osf true", 0)
2508 preamble = "\\usepackage["
2510 document.header[j] = "\\font_osf false"
2514 preamble += "]{MinionPro}"
2515 add_to_preamble(document, [preamble])
2516 del document.header[x]
2519 def revert_font_opts(document):
2520 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2522 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2523 Babel = (get_value(document.header, "\\language_package") == "babel")
2526 regexp = re.compile(r'(\\font_roman_opts)')
2527 i = find_re(document.header, regexp, 0)
2529 # We need to use this regex since split() does not handle quote protection
2530 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2531 opts = romanopts[1].strip('"')
2532 del document.header[i]
2534 regexp = re.compile(r'(\\font_roman)')
2535 i = find_re(document.header, regexp, 0)
2537 # We need to use this regex since split() does not handle quote protection
2538 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2539 font = romanfont[2].strip('"')
2540 romanfont[2] = '"default"'
2541 document.header[i] = " ".join(romanfont)
2542 if font != "default":
2544 preamble = "\\babelfont{rm}["
2546 preamble = "\\setmainfont["
2549 preamble += "Mapping=tex-text]{"
2552 add_to_preamble(document, [preamble])
2555 regexp = re.compile(r'(\\font_sans_opts)')
2556 i = find_re(document.header, regexp, 0)
2559 # We need to use this regex since split() does not handle quote protection
2560 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2561 opts = sfopts[1].strip('"')
2562 del document.header[i]
2564 regexp = re.compile(r'(\\font_sf_scale)')
2565 i = find_re(document.header, regexp, 0)
2567 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2568 regexp = re.compile(r'(\\font_sans)')
2569 i = find_re(document.header, regexp, 0)
2571 # We need to use this regex since split() does not handle quote protection
2572 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2573 font = sffont[2].strip('"')
2574 sffont[2] = '"default"'
2575 document.header[i] = " ".join(sffont)
2576 if font != "default":
2578 preamble = "\\babelfont{sf}["
2580 preamble = "\\setsansfont["
2584 preamble += "Scale=0."
2585 preamble += scaleval
2587 preamble += "Mapping=tex-text]{"
2590 add_to_preamble(document, [preamble])
2593 regexp = re.compile(r'(\\font_typewriter_opts)')
2594 i = find_re(document.header, regexp, 0)
2597 # We need to use this regex since split() does not handle quote protection
2598 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2599 opts = ttopts[1].strip('"')
2600 del document.header[i]
2602 regexp = re.compile(r'(\\font_tt_scale)')
2603 i = find_re(document.header, regexp, 0)
2605 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2606 regexp = re.compile(r'(\\font_typewriter)')
2607 i = find_re(document.header, regexp, 0)
2609 # We need to use this regex since split() does not handle quote protection
2610 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2611 font = ttfont[2].strip('"')
2612 ttfont[2] = '"default"'
2613 document.header[i] = " ".join(ttfont)
2614 if font != "default":
2616 preamble = "\\babelfont{tt}["
2618 preamble = "\\setmonofont["
2622 preamble += "Scale=0."
2623 preamble += scaleval
2625 preamble += "Mapping=tex-text]{"
2628 add_to_preamble(document, [preamble])
2631 def revert_plainNotoFonts_xopts(document):
2632 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2634 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2638 y = find_token(document.header, "\\font_osf true", 0)
2642 regexp = re.compile(r'(\\font_roman_opts)')
2643 x = find_re(document.header, regexp, 0)
2644 if x == -1 and not osf:
2649 # We need to use this regex since split() does not handle quote protection
2650 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2651 opts = romanopts[1].strip('"')
2657 i = find_token(document.header, "\\font_roman", 0)
2661 # We need to use this regex since split() does not handle quote protection
2662 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2663 roman = romanfont[1].strip('"')
2664 if roman != "NotoSerif-TLF":
2667 j = find_token(document.header, "\\font_sans", 0)
2671 # We need to use this regex since split() does not handle quote protection
2672 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2673 sf = sffont[1].strip('"')
2677 j = find_token(document.header, "\\font_typewriter", 0)
2681 # We need to use this regex since split() does not handle quote protection
2682 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2683 tt = ttfont[1].strip('"')
2687 # So we have noto as "complete font"
2688 romanfont[1] = '"default"'
2689 document.header[i] = " ".join(romanfont)
2691 preamble = "\\usepackage["
2693 preamble += "]{noto}"
2694 add_to_preamble(document, [preamble])
2696 document.header[y] = "\\font_osf false"
2698 del document.header[x]
2701 def revert_notoFonts_xopts(document):
2702 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2704 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2708 fm = createFontMapping(['Noto'])
2709 if revert_fonts(document, fm, fontmap, True):
2710 add_preamble_fonts(document, fontmap)
2713 def revert_IBMFonts_xopts(document):
2714 " Revert native IBM font definition (with extra options) to LaTeX "
2716 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2720 fm = createFontMapping(['IBM'])
2721 if revert_fonts(document, fm, fontmap, True):
2722 add_preamble_fonts(document, fontmap)
2725 def revert_AdobeFonts_xopts(document):
2726 " Revert native Adobe font definition (with extra options) to LaTeX "
2728 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2732 fm = createFontMapping(['Adobe'])
2733 if revert_fonts(document, fm, fontmap, True):
2734 add_preamble_fonts(document, fontmap)
2737 def convert_osf(document):
2738 " Convert \\font_osf param to new format "
2740 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2742 i = find_token(document.header, '\\font_osf', 0)
2744 document.warning("Malformed LyX document: Missing \\font_osf.")
2747 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2748 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2750 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2751 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2754 document.header.insert(i, "\\font_sans_osf false")
2755 document.header.insert(i + 1, "\\font_typewriter_osf false")
2759 x = find_token(document.header, "\\font_sans", 0)
2761 document.warning("Malformed LyX document: Missing \\font_sans.")
2763 # We need to use this regex since split() does not handle quote protection
2764 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2765 sf = sffont[1].strip('"')
2767 document.header.insert(i, "\\font_sans_osf true")
2769 document.header.insert(i, "\\font_sans_osf false")
2771 x = find_token(document.header, "\\font_typewriter", 0)
2773 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2775 # We need to use this regex since split() does not handle quote protection
2776 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2777 tt = ttfont[1].strip('"')
2779 document.header.insert(i + 1, "\\font_typewriter_osf true")
2781 document.header.insert(i + 1, "\\font_typewriter_osf false")
2784 document.header.insert(i, "\\font_sans_osf false")
2785 document.header.insert(i + 1, "\\font_typewriter_osf false")
2788 def revert_osf(document):
2789 " Revert \\font_*_osf params "
2791 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2793 i = find_token(document.header, '\\font_roman_osf', 0)
2795 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2798 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2799 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2801 i = find_token(document.header, '\\font_sans_osf', 0)
2803 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2806 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2807 del document.header[i]
2809 i = find_token(document.header, '\\font_typewriter_osf', 0)
2811 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2814 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2815 del document.header[i]
2818 i = find_token(document.header, '\\font_osf', 0)
2820 document.warning("Malformed LyX document: Missing \\font_osf.")
2822 document.header[i] = "\\font_osf true"
2825 def revert_texfontopts(document):
2826 " Revert native TeX font definitions (with extra options) to LaTeX "
2828 if get_bool_value(document.header, "\\use_non_tex_fonts"):
2831 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2833 # First the sf (biolinum only)
2834 regexp = re.compile(r'(\\font_sans_opts)')
2835 x = find_re(document.header, regexp, 0)
2837 # We need to use this regex since split() does not handle quote protection
2838 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2839 opts = sfopts[1].strip('"')
2840 i = find_token(document.header, "\\font_sans", 0)
2842 document.warning("Malformed LyX document: Missing \\font_sans.")
2844 # We need to use this regex since split() does not handle quote protection
2845 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2846 sans = sffont[1].strip('"')
2847 if sans == "biolinum":
2849 sffont[1] = '"default"'
2850 document.header[i] = " ".join(sffont)
2852 j = find_token(document.header, "\\font_sans_osf true", 0)
2855 k = find_token(document.header, "\\font_sf_scale", 0)
2857 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2859 sfscale = document.header[k].split()
2862 document.header[k] = " ".join(sfscale)
2865 sf_scale = float(val)
2867 document.warning("Invalid font_sf_scale value: " + val)
2868 preamble = "\\usepackage["
2870 document.header[j] = "\\font_sans_osf false"
2872 if sf_scale != 100.0:
2873 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2875 preamble += "]{biolinum}"
2876 add_to_preamble(document, [preamble])
2877 del document.header[x]
2879 regexp = re.compile(r'(\\font_roman_opts)')
2880 x = find_re(document.header, regexp, 0)
2884 # We need to use this regex since split() does not handle quote protection
2885 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2886 opts = romanopts[1].strip('"')
2888 i = find_token(document.header, "\\font_roman", 0)
2890 document.warning("Malformed LyX document: Missing \\font_roman.")
2893 # We need to use this regex since split() does not handle quote protection
2894 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2895 roman = romanfont[1].strip('"')
2896 if not roman in rmfonts:
2898 romanfont[1] = '"default"'
2899 document.header[i] = " ".join(romanfont)
2901 if roman == "utopia":
2903 elif roman == "palatino":
2904 package = "mathpazo"
2905 elif roman == "times":
2906 package = "mathptmx"
2907 elif roman == "xcharter":
2908 package = "XCharter"
2910 j = find_token(document.header, "\\font_roman_osf true", 0)
2912 if roman == "cochineal":
2913 osf = "proportional,osf,"
2914 elif roman == "utopia":
2916 elif roman == "garamondx":
2918 elif roman == "libertine":
2920 elif roman == "palatino":
2922 elif roman == "xcharter":
2924 document.header[j] = "\\font_roman_osf false"
2925 k = find_token(document.header, "\\font_sc true", 0)
2927 if roman == "utopia":
2929 if roman == "palatino" and osf == "":
2931 document.header[k] = "\\font_sc false"
2932 preamble = "\\usepackage["
2935 preamble += "]{" + package + "}"
2936 add_to_preamble(document, [preamble])
2937 del document.header[x]
2940 def convert_CantarellFont(document):
2941 " Handle Cantarell font definition to LaTeX "
2943 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2944 fm = createFontMapping(['Cantarell'])
2945 convert_fonts(document, fm, "oldstyle")
2947 def revert_CantarellFont(document):
2948 " Revert native Cantarell font definition to LaTeX "
2950 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2952 fm = createFontMapping(['Cantarell'])
2953 if revert_fonts(document, fm, fontmap, False, True):
2954 add_preamble_fonts(document, fontmap)
2956 def convert_ChivoFont(document):
2957 " Handle Chivo font definition to LaTeX "
2959 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2960 fm = createFontMapping(['Chivo'])
2961 convert_fonts(document, fm, "oldstyle")
2963 def revert_ChivoFont(document):
2964 " Revert native Chivo font definition to LaTeX "
2966 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2968 fm = createFontMapping(['Chivo'])
2969 if revert_fonts(document, fm, fontmap, False, True):
2970 add_preamble_fonts(document, fontmap)
2973 def convert_FiraFont(document):
2974 " Handle Fira font definition to LaTeX "
2976 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2977 fm = createFontMapping(['Fira'])
2978 convert_fonts(document, fm, "lf")
2980 def revert_FiraFont(document):
2981 " Revert native Fira font definition to LaTeX "
2983 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
2985 fm = createFontMapping(['Fira'])
2986 if revert_fonts(document, fm, fontmap, False, True):
2987 add_preamble_fonts(document, fontmap)
2990 def convert_Semibolds(document):
2991 " Move semibold options to extraopts "
2993 NonTeXFonts = get_bool_value(document.header, "\\use_non_tex_fonts")
2995 i = find_token(document.header, "\\font_roman", 0)
2997 document.warning("Malformed LyX document: Missing \\font_roman.")
2999 # We need to use this regex since split() does not handle quote protection
3000 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3001 roman = romanfont[1].strip('"')
3002 if roman == "IBMPlexSerifSemibold":
3003 romanfont[1] = '"IBMPlexSerif"'
3004 document.header[i] = " ".join(romanfont)
3006 if NonTeXFonts == False:
3007 regexp = re.compile(r'(\\font_roman_opts)')
3008 x = find_re(document.header, regexp, 0)
3010 # Sensible place to insert tag
3011 fo = find_token(document.header, "\\font_sf_scale")
3013 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3015 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
3017 # We need to use this regex since split() does not handle quote protection
3018 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3019 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
3021 i = find_token(document.header, "\\font_sans", 0)
3023 document.warning("Malformed LyX document: Missing \\font_sans.")
3025 # We need to use this regex since split() does not handle quote protection
3026 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3027 sf = sffont[1].strip('"')
3028 if sf == "IBMPlexSansSemibold":
3029 sffont[1] = '"IBMPlexSans"'
3030 document.header[i] = " ".join(sffont)
3032 if NonTeXFonts == False:
3033 regexp = re.compile(r'(\\font_sans_opts)')
3034 x = find_re(document.header, regexp, 0)
3036 # Sensible place to insert tag
3037 fo = find_token(document.header, "\\font_sf_scale")
3039 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3041 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3043 # We need to use this regex since split() does not handle quote protection
3044 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3045 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3047 i = find_token(document.header, "\\font_typewriter", 0)
3049 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3051 # We need to use this regex since split() does not handle quote protection
3052 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3053 tt = ttfont[1].strip('"')
3054 if tt == "IBMPlexMonoSemibold":
3055 ttfont[1] = '"IBMPlexMono"'
3056 document.header[i] = " ".join(ttfont)
3058 if NonTeXFonts == False:
3059 regexp = re.compile(r'(\\font_typewriter_opts)')
3060 x = find_re(document.header, regexp, 0)
3062 # Sensible place to insert tag
3063 fo = find_token(document.header, "\\font_tt_scale")
3065 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3067 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3069 # We need to use this regex since split() does not handle quote protection
3070 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3071 document.header[x] = "\\font_typewriter_opts \"semibold, " + ttopts[1].strip('"') + "\""
3074 def convert_NotoRegulars(document):
3075 " Merge diverse noto reagular fonts "
3077 i = find_token(document.header, "\\font_roman", 0)
3079 document.warning("Malformed LyX document: Missing \\font_roman.")
3081 # We need to use this regex since split() does not handle quote protection
3082 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3083 roman = romanfont[1].strip('"')
3084 if roman == "NotoSerif-TLF":
3085 romanfont[1] = '"NotoSerifRegular"'
3086 document.header[i] = " ".join(romanfont)
3088 i = find_token(document.header, "\\font_sans", 0)
3090 document.warning("Malformed LyX document: Missing \\font_sans.")
3092 # We need to use this regex since split() does not handle quote protection
3093 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3094 sf = sffont[1].strip('"')
3095 if sf == "NotoSans-TLF":
3096 sffont[1] = '"NotoSansRegular"'
3097 document.header[i] = " ".join(sffont)
3099 i = find_token(document.header, "\\font_typewriter", 0)
3101 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3103 # We need to use this regex since split() does not handle quote protection
3104 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3105 tt = ttfont[1].strip('"')
3106 if tt == "NotoMono-TLF":
3107 ttfont[1] = '"NotoMonoRegular"'
3108 document.header[i] = " ".join(ttfont)
3111 def convert_CrimsonProFont(document):
3112 " Handle CrimsonPro font definition to LaTeX "
3114 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3115 fm = createFontMapping(['CrimsonPro'])
3116 convert_fonts(document, fm, "lf")
3118 def revert_CrimsonProFont(document):
3119 " Revert native CrimsonPro font definition to LaTeX "
3121 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3123 fm = createFontMapping(['CrimsonPro'])
3124 if revert_fonts(document, fm, fontmap, False, True):
3125 add_preamble_fonts(document, fontmap)
3128 def revert_pagesizes(document):
3129 " Revert new page sizes in memoir and KOMA to options "
3131 if document.textclass != "memoir" and document.textclass[:2] != "scr":
3134 i = find_token(document.header, "\\use_geometry true", 0)
3138 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3140 i = find_token(document.header, "\\papersize", 0)
3142 document.warning("Malformed LyX document! Missing \\papersize header.")
3144 val = get_value(document.header, "\\papersize", i)
3149 document.header[i] = "\\papersize default"
3151 i = find_token(document.header, "\\options", 0)
3153 i = find_token(document.header, "\\textclass", 0)
3155 document.warning("Malformed LyX document! Missing \\textclass header.")
3157 document.header.insert(i, "\\options " + val)
3159 document.header[i] = document.header[i] + "," + val
3162 def convert_pagesizes(document):
3163 " Convert to new page sizes in memoir and KOMA to options "
3165 if document.textclass != "memoir" and document.textclass[:3] != "scr":
3168 i = find_token(document.header, "\\use_geometry true", 0)
3172 defsizes = ["default", "custom", "letterpaper", "legalpaper", "executivepaper", "a4paper", "a5paper", "b5paper"]
3174 i = find_token(document.header, "\\papersize", 0)
3176 document.warning("Malformed LyX document! Missing \\papersize header.")
3178 val = get_value(document.header, "\\papersize", i)
3183 i = find_token(document.header, "\\use_geometry false", 0)
3185 # Maintain use of geometry
3186 document.header[1] = "\\use_geometry true"
3188 def revert_komafontsizes(document):
3189 " Revert new font sizes in KOMA to options "
3191 if document.textclass[:3] != "scr":
3194 i = find_token(document.header, "\\paperfontsize", 0)
3196 document.warning("Malformed LyX document! Missing \\paperfontsize header.")
3199 defsizes = ["default", "10", "11", "12"]
3201 val = get_value(document.header, "\\paperfontsize", i)
3206 document.header[i] = "\\paperfontsize default"
3208 fsize = "fontsize=" + val
3210 i = find_token(document.header, "\\options", 0)
3212 i = find_token(document.header, "\\textclass", 0)
3214 document.warning("Malformed LyX document! Missing \\textclass header.")
3216 document.header.insert(i, "\\options " + fsize)
3218 document.header[i] = document.header[i] + "," + fsize
3221 def revert_dupqualicites(document):
3222 " Revert qualified citation list commands with duplicate keys to ERT "
3224 # LyX 2.3 only supports qualified citation lists with unique keys. Thus,
3225 # we need to revert those with multiple uses of the same key.
3229 i = find_token(document.header, "\\cite_engine", 0)
3231 document.warning("Malformed document! Missing \\cite_engine")
3233 engine = get_value(document.header, "\\cite_engine", i)
3235 if not engine in ["biblatex", "biblatex-natbib"]:
3238 # Citation insets that support qualified lists, with their LaTeX code
3242 "citet" : "textcites",
3243 "Citet" : "Textcites",
3244 "citep" : "parencites",
3245 "Citep" : "Parencites",
3246 "Footcite" : "Smartcites",
3247 "footcite" : "smartcites",
3248 "Autocite" : "Autocites",
3249 "autocite" : "autocites",
3254 i = find_token(document.body, "\\begin_inset CommandInset citation", i)
3257 j = find_end_of_inset(document.body, i)
3259 document.warning("Can't find end of citation inset at line %d!!" %(i))
3263 k = find_token(document.body, "LatexCommand", i, j)
3265 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
3269 cmd = get_value(document.body, "LatexCommand", k)
3270 if not cmd in list(ql_citations.keys()):
3274 pres = find_token(document.body, "pretextlist", i, j)
3275 posts = find_token(document.body, "posttextlist", i, j)
3276 if pres == -1 and posts == -1:
3281 key = get_quoted_value(document.body, "key", i, j)
3283 document.warning("Citation inset at line %d does not have a key!" %(i))
3287 keys = key.split(",")
3288 ukeys = list(set(keys))
3289 if len(keys) == len(ukeys):
3294 pretexts = get_quoted_value(document.body, "pretextlist", pres)
3295 posttexts = get_quoted_value(document.body, "posttextlist", posts)
3297 pre = get_quoted_value(document.body, "before", i, j)
3298 post = get_quoted_value(document.body, "after", i, j)
3299 prelist = pretexts.split("\t")
3302 ppp = pp.split(" ", 1)
3308 if ppp[0] in premap:
3309 premap[ppp[0]] = premap[ppp[0]] + "\t" + val
3311 premap[ppp[0]] = val
3312 postlist = posttexts.split("\t")
3315 ppp = pp.split(" ", 1)
3321 if ppp[0] in postmap:
3322 postmap[ppp[0]] = postmap[ppp[0]] + "\t" + val
3324 postmap[ppp[0]] = val
3325 # Replace known new commands with ERT
3326 if "(" in pre or ")" in pre:
3327 pre = "{" + pre + "}"
3328 if "(" in post or ")" in post:
3329 post = "{" + post + "}"
3330 res = "\\" + ql_citations[cmd]
3332 res += "(" + pre + ")"
3334 res += "(" + post + ")"
3338 if premap.get(kk, "") != "":
3339 akeys = premap[kk].split("\t", 1)
3342 res += "[" + akey + "]"
3344 premap[kk] = "\t".join(akeys[1:])
3347 if postmap.get(kk, "") != "":
3348 akeys = postmap[kk].split("\t", 1)
3351 res += "[" + akey + "]"
3353 postmap[kk] = "\t".join(akeys[1:])
3356 elif premap.get(kk, "") != "":
3358 res += "{" + kk + "}"
3359 document.body[i:j+1] = put_cmd_in_ert([res])
3362 def convert_pagesizenames(document):
3363 " Convert LyX page sizes names "
3365 i = find_token(document.header, "\\papersize", 0)
3367 document.warning("Malformed LyX document! Missing \\papersize header.")
3369 oldnames = ["letterpaper", "legalpaper", "executivepaper", \
3370 "a0paper", "a1paper", "a2paper", "a3paper", "a4paper", "a5paper", "a6paper", \
3371 "b0paper", "b1paper", "b2paper", "b3paper", "b4paper", "b5paper", "b6paper", \
3372 "c0paper", "c1paper", "c2paper", "c3paper", "c4paper", "c5paper", "c6paper"]
3373 val = get_value(document.header, "\\papersize", i)
3375 newval = val.replace("paper", "")
3376 document.header[i] = "\\papersize " + newval
3378 def revert_pagesizenames(document):
3379 " Convert LyX page sizes names "
3381 i = find_token(document.header, "\\papersize", 0)
3383 document.warning("Malformed LyX document! Missing \\papersize header.")
3385 newnames = ["letter", "legal", "executive", \
3386 "a0", "a1", "a2", "a3", "a4", "a5", "a6", \
3387 "b0", "b1", "b2", "b3", "b4", "b5", "b6", \
3388 "c0", "c1", "c2", "c3", "c4", "c5", "c6"]
3389 val = get_value(document.header, "\\papersize", i)
3391 newval = val + "paper"
3392 document.header[i] = "\\papersize " + newval
3395 def revert_theendnotes(document):
3396 " Reverts native support of \\theendnotes to TeX-code "
3398 if not "endnotes" in document.get_module_list() and not "foottoend" in document.get_module_list():
3403 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3406 j = find_end_of_inset(document.body, i)
3408 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3411 document.body[i : j + 1] = put_cmd_in_ert("\\theendnotes")
3414 def revert_enotez(document):
3415 " Reverts native support of enotez package to TeX-code "
3417 if not "enotez" in document.get_module_list() and not "foottoenotez" in document.get_module_list():
3421 if find_token(document.body, "\\begin_inset Flex Endnote", 0) != -1:
3424 revert_flex_inset(document.body, "Endnote", "\\endnote")
3428 i = find_token(document.body, "\\begin_inset FloatList endnote", i + 1)
3431 j = find_end_of_inset(document.body, i)
3433 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3437 document.body[i : j + 1] = put_cmd_in_ert("\\printendnotes")
3440 add_to_preamble(document, ["\\usepackage{enotez}"])
3441 document.del_module("enotez")
3442 document.del_module("foottoenotez")
3445 def revert_memoir_endnotes(document):
3446 " Reverts native support of memoir endnotes to TeX-code "
3448 if document.textclass != "memoir":
3451 encommand = "\\pagenote"
3452 modules = document.get_module_list()
3453 if "enotez" in modules or "foottoenotez" in modules or "endnotes" in modules or "foottoend" in modules:
3454 encommand = "\\endnote"
3456 revert_flex_inset(document.body, "Endnote", encommand)
3460 i = find_token(document.body, "\\begin_inset FloatList pagenote", i + 1)
3463 j = find_end_of_inset(document.body, i)
3465 document.warning("Malformed LyX document: Can't find end of FloatList inset")
3468 if document.body[i] == "\\begin_inset FloatList pagenote*":
3469 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes*")
3471 document.body[i : j + 1] = put_cmd_in_ert("\\printpagenotes")
3472 add_to_preamble(document, ["\\makepagenote"])
3475 def revert_totalheight(document):
3476 " Reverts graphics height parameter from totalheight to height "
3478 relative_heights = {
3479 "\\textwidth" : "text%",
3480 "\\columnwidth" : "col%",
3481 "\\paperwidth" : "page%",
3482 "\\linewidth" : "line%",
3483 "\\textheight" : "theight%",
3484 "\\paperheight" : "pheight%",
3485 "\\baselineskip " : "baselineskip%"
3489 i = find_token(document.body, "\\begin_inset Graphics", i)
3492 j = find_end_of_inset(document.body, i)
3494 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3498 rx = re.compile(r'\s*special\s*(\S+)$')
3499 rxx = re.compile(r'(\d*\.*\d+)(\S+)$')
3500 k = find_re(document.body, rx, i, j)
3504 m = rx.match(document.body[k])
3506 special = m.group(1)
3507 mspecial = special.split(',')
3508 for spc in mspecial:
3509 if spc.startswith("height="):
3510 oldheight = spc.split('=')[1]
3511 ms = rxx.search(oldheight)
3513 oldunit = ms.group(2)
3514 if oldunit in list(relative_heights.keys()):
3515 oldval = str(float(ms.group(1)) * 100)
3516 oldunit = relative_heights[oldunit]
3517 oldheight = oldval + oldunit
3518 mspecial.remove(spc)
3520 if len(mspecial) > 0:
3521 special = ",".join(mspecial)
3525 rx = re.compile(r'(\s*height\s*)(\S+)$')
3526 kk = find_re(document.body, rx, i, j)
3528 m = rx.match(document.body[kk])
3534 val = val + "," + special
3535 document.body[k] = "\tspecial " + "totalheight=" + val
3537 document.body.insert(kk, "\tspecial totalheight=" + val)
3539 document.body[kk] = m.group(1) + oldheight
3541 del document.body[kk]
3542 elif oldheight != "":
3544 document.body[k] = "\tspecial " + special
3545 document.body.insert(k, "\theight " + oldheight)
3547 document.body[k] = "\theight " + oldheight
3551 def convert_totalheight(document):
3552 " Converts graphics height parameter from totalheight to height "
3554 relative_heights = {
3555 "text%" : "\\textwidth",
3556 "col%" : "\\columnwidth",
3557 "page%" : "\\paperwidth",
3558 "line%" : "\\linewidth",
3559 "theight%" : "\\textheight",
3560 "pheight%" : "\\paperheight",
3561 "baselineskip%" : "\\baselineskip"
3565 i = find_token(document.body, "\\begin_inset Graphics", i)
3568 j = find_end_of_inset(document.body, i)
3570 document.warning("Can't find end of graphics inset at line %d!!" %(i))
3574 rx = re.compile(r'\s*special\s*(\S+)$')
3575 k = find_re(document.body, rx, i, j)
3579 m = rx.match(document.body[k])
3581 special = m.group(1)
3582 mspecial = special.split(',')
3583 for spc in mspecial:
3584 if spc[:12] == "totalheight=":
3585 newheight = spc.split('=')[1]
3586 mspecial.remove(spc)
3588 if len(mspecial) > 0:
3589 special = ",".join(mspecial)
3593 rx = re.compile(r'(\s*height\s*)(\d+\.?\d*)(\S+)$')
3594 kk = find_re(document.body, rx, i, j)
3596 m = rx.match(document.body[kk])
3601 if unit in list(relative_heights.keys()):
3602 val = str(float(val) / 100)
3603 unit = relative_heights[unit]
3606 val = val + unit + "," + special
3607 document.body[k] = "\tspecial " + "height=" + val
3609 document.body.insert(kk + 1, "\tspecial height=" + val + unit)
3611 document.body[kk] = m.group(1) + newheight
3613 del document.body[kk]
3614 elif newheight != "":
3615 document.body.insert(k, "\theight " + newheight)
3619 def convert_changebars(document):
3620 " Converts the changebars module to native solution "
3622 if not "changebars" in document.get_module_list():
3625 i = find_token(document.header, "\\output_changes", 0)
3627 document.warning("Malformed LyX document! Missing \\output_changes header.")
3628 document.del_module("changebars")
3631 document.header.insert(i, "\\change_bars true")
3632 document.del_module("changebars")
3635 def revert_changebars(document):
3636 " Converts native changebar param to module "
3638 i = find_token(document.header, "\\change_bars", 0)
3640 document.warning("Malformed LyX document! Missing \\change_bars header.")
3643 val = get_value(document.header, "\\change_bars", i)
3646 document.add_module("changebars")
3648 del document.header[i]
3651 def convert_postpone_fragile(document):
3652 " Adds false \\postpone_fragile_content buffer param "
3654 i = find_token(document.header, "\\output_changes", 0)
3656 document.warning("Malformed LyX document! Missing \\output_changes header.")
3658 # Set this to false for old documents (see #2154)
3659 document.header.insert(i, "\\postpone_fragile_content false")
3662 def revert_postpone_fragile(document):
3663 " Remove \\postpone_fragile_content buffer param "
3665 i = find_token(document.header, "\\postpone_fragile_content", 0)
3667 document.warning("Malformed LyX document! Missing \\postpone_fragile_content.")
3670 del document.header[i]
3673 def revert_colrow_tracking(document):
3674 " Remove change tag from tabular columns/rows "
3677 i = find_token(document.body, "\\begin_inset Tabular", i+1)
3680 j = find_end_of_inset(document.body, i+1)
3682 document.warning("Malformed LyX document: Could not find end of tabular.")
3684 for k in range(i, j):
3685 m = re.search('^<column.*change="([^"]+)".*>$', document.body[k])
3687 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3688 m = re.search('^<row.*change="([^"]+)".*>$', document.body[k])
3690 document.body[k] = document.body[k].replace(' change="' + m.group(1) + '"', '')
3693 def convert_counter_maintenance(document):
3694 " Convert \\maintain_unincluded_children buffer param from boolean value tro tristate "
3696 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3698 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3701 val = get_value(document.header, "\\maintain_unincluded_children", i)
3704 document.header[i] = "\\maintain_unincluded_children strict"
3706 document.header[i] = "\\maintain_unincluded_children no"
3709 def revert_counter_maintenance(document):
3710 " Revert \\maintain_unincluded_children buffer param to previous boolean value "
3712 i = find_token(document.header, "\\maintain_unincluded_children", 0)
3714 document.warning("Malformed LyX document! Missing \\maintain_unincluded_children.")
3717 val = get_value(document.header, "\\maintain_unincluded_children", i)
3720 document.header[i] = "\\maintain_unincluded_children false"
3722 document.header[i] = "\\maintain_unincluded_children true"
3725 def revert_counter_inset(document):
3726 " Revert counter inset to ERT, where possible"
3728 needed_counters = {}
3730 i = find_token(document.body, "\\begin_inset CommandInset counter", i)
3733 j = find_end_of_inset(document.body, i)
3735 document.warning("Can't find end of counter inset at line %d!" % i)
3738 lyx = get_quoted_value(document.body, "lyxonly", i, j)
3740 # there is nothing we can do to affect the LyX counters
3741 document.body[i : j + 1] = []
3744 cnt = get_quoted_value(document.body, "counter", i, j)
3746 document.warning("No counter given for inset at line %d!" % i)
3750 cmd = get_quoted_value(document.body, "LatexCommand", i, j)
3751 document.warning(cmd)
3754 val = get_quoted_value(document.body, "value", i, j)
3756 document.warning("Can't convert counter inset at line %d!" % i)
3758 ert = put_cmd_in_ert("\\setcounter{%s}{%s}" % (cnt, val))
3759 elif cmd == "addto":
3760 val = get_quoted_value(document.body, "value", i, j)
3762 document.warning("Can't convert counter inset at line %d!" % i)
3764 ert = put_cmd_in_ert("\\addtocounter{%s}{%s}" % (cnt, val))
3765 elif cmd == "reset":
3766 ert = put_cmd_in_ert("\\setcounter{%s}{0}" % (cnt))
3768 needed_counters[cnt] = 1
3769 savecnt = "LyXSave" + cnt
3770 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (savecnt, cnt))
3771 elif cmd == "restore":
3772 needed_counters[cnt] = 1
3773 savecnt = "LyXSave" + cnt
3774 ert = put_cmd_in_ert("\\setcounter{%s}{\\value{%s}}" % (cnt, savecnt))
3776 document.warning("Unknown counter command `%s' in inset at line %d!" % (cnt, i))
3779 document.body[i : j + 1] = ert
3784 for cnt in needed_counters:
3785 pretext.append("\\newcounter{LyXSave%s}" % (cnt))
3787 add_to_preamble(document, pretext)
3790 def revert_ams_spaces(document):
3791 "Revert InsetSpace medspace and thickspace into their TeX-code counterparts"
3793 insets = ["\\medspace{}", "\\thickspace{}"]
3794 for inset in insets:
3796 i = find_token(document.body, "\\begin_inset space " + inset, i)
3799 end = find_end_of_inset(document.body, i)
3800 subst = put_cmd_in_ert(inset)
3801 document.body[i : end + 1] = subst
3805 # load amsmath in the preamble if not already loaded
3806 i = find_token(document.header, "\\use_package amsmath 2", 0)
3808 add_to_preamble(document, ["\\@ifundefined{thickspace}{\\usepackage{amsmath}}{}"])
3812 def convert_parskip(document):
3813 " Move old parskip settings to preamble "
3815 i = find_token(document.header, "\\paragraph_separation skip", 0)
3819 j = find_token(document.header, "\\defskip", 0)
3821 document.warning("Malformed LyX document! Missing \\defskip.")
3824 val = get_value(document.header, "\\defskip", j)
3826 skipval = "\\medskipamount"
3827 if val == "smallskip" or val == "medskip" or val == "bigskip":
3828 skipval = "\\" + val + "amount"
3832 add_to_preamble(document, ["\\setlength{\\parskip}{" + skipval + "}", "\\setlength{\\parindent}{0pt}"])
3834 document.header[i] = "\\paragraph_separation indent"
3835 document.header[j] = "\\paragraph_indentation default"
3838 def revert_parskip(document):
3839 " Revert new parskip settings to preamble "
3841 i = find_token(document.header, "\\paragraph_separation skip", 0)
3845 j = find_token(document.header, "\\defskip", 0)
3847 document.warning("Malformed LyX document! Missing \\defskip.")
3850 val = get_value(document.header, "\\defskip", j)
3853 if val == "smallskip" or val == "medskip" or val == "bigskip":
3854 skipval = "[skip=\\" + val + "amount]"
3855 elif val == "fullline":
3856 skipval = "[skip=\\baselineskip]"
3857 elif val != "halfline":
3858 skipval = "[skip={" + val + "}]"
3860 add_to_preamble(document, ["\\usepackage" + skipval + "{parskip}"])
3862 document.header[i] = "\\paragraph_separation indent"
3863 document.header[j] = "\\paragraph_indentation default"
3866 def revert_line_vspaces(document):
3867 " Revert fulline and halfline vspaces to TeX "
3869 "fullline*" : "\\vspace*{\\baselineskip}",
3870 "fullline" : "\\vspace{\\baselineskip}",
3871 "halfline*" : "\\vspace*{0.5\\baselineskip}",
3872 "halfline" : "\\vspace{0.5\\baselineskip}",
3874 for inset in insets.keys():
3876 i = find_token(document.body, "\\begin_inset VSpace " + inset, i)
3879 end = find_end_of_inset(document.body, i)
3880 subst = put_cmd_in_ert(insets[inset])
3881 document.body[i : end + 1] = subst
3883 def convert_libertinus_rm_fonts(document):
3884 """Handle Libertinus serif fonts definition to LaTeX"""
3886 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3887 fm = createFontMapping(['Libertinus'])
3888 convert_fonts(document, fm)
3890 def revert_libertinus_rm_fonts(document):
3891 """Revert Libertinus serif font definition to LaTeX"""
3893 if not get_bool_value(document.header, "\\use_non_tex_fonts"):
3895 fm = createFontMapping(['libertinus'])
3896 if revert_fonts(document, fm, fontmap):
3897 add_preamble_fonts(document, fontmap)
3899 def revert_libertinus_sftt_fonts(document):
3900 " Revert Libertinus sans and tt font definitions to LaTeX "
3902 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3904 i = find_token(document.header, "\\font_sans \"LibertinusSans-LF\"", 0)
3906 j = find_token(document.header, "\\font_sans_osf true", 0)
3908 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-OsF}"])
3909 document.header[j] = "\\font_sans_osf false"
3911 add_to_preamble(document, ["\\renewcommand{\\sfdefault}{LibertinusSans-LF}"])
3912 document.header[i] = document.header[i].replace("LibertinusSans-LF", "default")
3914 sfval = find_token(document.header, "\\font_sf_scale", 0)
3916 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
3918 sfscale = document.header[sfval].split()
3921 document.header[sfval] = " ".join(sfscale)
3924 sf_scale = float(val)
3926 document.warning("Invalid font_sf_scale value: " + val)
3927 if sf_scale != "100.0":
3928 add_to_preamble(document, ["\\renewcommand*{\\LibertinusSans@scale}{" + str(sf_scale / 100.0) + "}"])
3930 i = find_token(document.header, "\\font_typewriter \"LibertinusMono-TLF\"", 0)
3932 add_to_preamble(document, ["\\renewcommand{\\ttdefault}{LibertinusMono-TLF}"])
3933 document.header[i] = document.header[i].replace("LibertinusMono-TLF", "default")
3935 ttval = find_token(document.header, "\\font_tt_scale", 0)
3937 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
3939 ttscale = document.header[ttval].split()
3942 document.header[ttval] = " ".join(ttscale)
3945 tt_scale = float(val)
3947 document.warning("Invalid font_tt_scale value: " + val)
3948 if tt_scale != "100.0":
3949 add_to_preamble(document, ["\\renewcommand*{\\LibertinusMono@scale}{" + str(tt_scale / 100.0) + "}"])
3952 def revert_docbook_table_output(document):
3953 i = find_token(document.header, '\\docbook_table_output')
3955 del document.header[i]
3958 def revert_nopagebreak(document):
3960 i = find_token(document.body, "\\begin_inset Newpage nopagebreak")
3963 end = find_end_of_inset(document.body, i)
3965 document.warning("Malformed LyX document: Could not find end of Newpage inset.")
3967 subst = put_cmd_in_ert("\\nopagebreak{}")
3968 document.body[i : end + 1] = subst
3971 def revert_hrquotes(document):
3972 " Revert Hungarian Quotation marks "
3974 i = find_token(document.header, "\\quotes_style hungarian", 0)
3976 document.header[i] = "\\quotes_style polish"
3980 i = find_token(document.body, "\\begin_inset Quotes h")
3983 if document.body[i] == "\\begin_inset Quotes hld":
3984 document.body[i] = "\\begin_inset Quotes pld"
3985 elif document.body[i] == "\\begin_inset Quotes hrd":
3986 document.body[i] = "\\begin_inset Quotes prd"
3987 elif document.body[i] == "\\begin_inset Quotes hls":
3988 document.body[i] = "\\begin_inset Quotes ald"
3989 elif document.body[i] == "\\begin_inset Quotes hrs":
3990 document.body[i] = "\\begin_inset Quotes ard"
3993 def convert_math_refs(document):
3996 i = find_token(document.body, "\\begin_inset Formula", i)
3999 j = find_end_of_inset(document.body, i)
4001 document.warning("Can't find end of inset at line %d of body!" % i)
4005 document.body[i] = document.body[i].replace("\\prettyref", "\\formatted")
4009 def revert_math_refs(document):
4012 i = find_token(document.body, "\\begin_inset Formula", i)
4015 j = find_end_of_inset(document.body, i)
4017 document.warning("Can't find end of inset at line %d of body!" % i)
4021 document.body[i] = document.body[i].replace("\\formatted", "\\prettyref")
4022 if "\\labelonly" in document.body[i]:
4023 document.body[i] = re.sub("\\\\labelonly{([^}]+?)}", "\\1", document.body[i])
4027 def convert_branch_colors(document):
4028 " Convert branch colors to semantic values "
4032 i = find_token(document.header, "\\branch", i)
4035 j = find_token(document.header, "\\end_branch", i)
4037 document.warning("Malformed LyX document. Can't find end of branch definition!")
4039 # We only support the standard LyX background for now
4040 k = find_token(document.header, "\\color #faf0e6", i, j)
4042 document.header[k] = "\\color background"
4046 def revert_branch_colors(document):
4047 " Revert semantic branch colors "
4051 i = find_token(document.header, "\\branch", i)
4054 j = find_token(document.header, "\\end_branch", i)
4056 document.warning("Malformed LyX document. Can't find end of branch definition!")
4058 k = find_token(document.header, "\\color", i, j)
4060 bcolor = get_value(document.header, "\\color", k)
4061 if bcolor[1] != "#":
4062 # this will be read as background by LyX 2.3
4063 document.header[k] = "\\color none"
4067 def revert_darkmode_graphics(document):
4068 " Revert darkModeSensitive InsetGraphics param "
4072 i = find_token(document.body, "\\begin_inset Graphics", i)
4075 j = find_end_of_inset(document.body, i)
4077 document.warning("Can't find end of graphics inset at line %d!!" %(i))
4080 k = find_token(document.body, "\tdarkModeSensitive", i, j)
4082 del document.body[k]
4086 def revert_branch_darkcols(document):
4087 " Revert dark branch colors "
4091 i = find_token(document.header, "\\branch", i)
4094 j = find_token(document.header, "\\end_branch", i)
4096 document.warning("Malformed LyX document. Can't find end of branch definition!")
4098 k = find_token(document.header, "\\color", i, j)
4100 m = re.search('\\\\color (\\S+) (\\S+)', document.header[k])
4102 document.header[k] = "\\color " + m.group(1)
4106 def revert_vcolumns2(document):
4107 """Revert varwidth columns with line breaks etc."""
4109 needvarwidth = False
4111 needcellvarwidth = False
4114 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4117 j = find_end_of_inset(document.body, i)
4119 document.warning("Malformed LyX document: Could not find end of tabular.")
4122 # Collect necessary column information
4124 nrows = int(document.body[i+1].split('"')[3])
4125 ncols = int(document.body[i+1].split('"')[5])
4127 for k in range(ncols):
4128 m = find_token(document.body, "<column", m)
4129 width = get_option_value(document.body[m], 'width')
4130 varwidth = get_option_value(document.body[m], 'varwidth')
4131 alignment = get_option_value(document.body[m], 'alignment')
4132 valignment = get_option_value(document.body[m], 'valignment')
4133 special = get_option_value(document.body[m], 'special')
4134 col_info.append([width, varwidth, alignment, valignment, special, m])
4139 for row in range(nrows):
4140 for col in range(ncols):
4141 m = find_token(document.body, "<cell", m)
4142 multicolumn = get_option_value(document.body[m], 'multicolumn') != ""
4143 multirow = get_option_value(document.body[m], 'multirow') != ""
4144 fixedwidth = get_option_value(document.body[m], 'width') != ""
4145 rotate = get_option_value(document.body[m], 'rotate')
4146 cellalign = get_option_value(document.body[m], 'alignment')
4147 cellvalign = get_option_value(document.body[m], 'valignment')
4148 # Check for: linebreaks, multipars, non-standard environments
4150 endcell = find_token(document.body, "</cell>", begcell)
4152 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
4153 vcand = not fixedwidth
4154 elif count_pars_in_inset(document.body, begcell + 2) > 1:
4155 vcand = not fixedwidth
4156 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
4157 vcand = not fixedwidth
4158 colalignment = col_info[col][2]
4159 colvalignment = col_info[col][3]
4161 if rotate == "" and ((colalignment == "left" and colvalignment == "top") or (multicolumn == True and cellalign == "left" and cellvalign == "top")):
4162 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][4] == "":
4164 col_line = col_info[col][5]
4166 vval = "V{\\linewidth}"
4168 document.body[m] = document.body[m][:-1] + " special=\"" + vval + "\">"
4170 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
4173 if multicolumn or multirow:
4174 if cellvalign == "middle":
4176 elif cellvalign == "bottom":
4179 if colvalignment == "middle":
4181 elif colvalignment == "bottom":
4183 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4184 elt = find_token_backwards(document.body, "\\end_layout", endcell)
4185 if flt != -1 and elt != -1:
4187 # we need to reset character layouts if necessary
4188 el = find_token(document.body, '\\emph on', flt, elt)
4190 extralines.append("\\emph default")
4191 el = find_token(document.body, '\\noun on', flt, elt)
4193 extralines.append("\\noun default")
4194 el = find_token(document.body, '\\series', flt, elt)
4196 extralines.append("\\series default")
4197 el = find_token(document.body, '\\family', flt, elt)
4199 extralines.append("\\family default")
4200 el = find_token(document.body, '\\shape', flt, elt)
4202 extralines.append("\\shape default")
4203 el = find_token(document.body, '\\color', flt, elt)
4205 extralines.append("\\color inherit")
4206 el = find_token(document.body, '\\size', flt, elt)
4208 extralines.append("\\size default")
4209 el = find_token(document.body, '\\bar under', flt, elt)
4211 extralines.append("\\bar default")
4212 el = find_token(document.body, '\\uuline on', flt, elt)
4214 extralines.append("\\uuline default")
4215 el = find_token(document.body, '\\uwave on', flt, elt)
4217 extralines.append("\\uwave default")
4218 el = find_token(document.body, '\\strikeout on', flt, elt)
4220 extralines.append("\\strikeout default")
4221 document.body[elt:elt+1] = extralines + put_cmd_in_ert("\\end{cellvarwidth}") + [r"\end_layout"]
4223 for q in range(flt, elt):
4224 if document.body[q] != "" and document.body[q][0] != "\\":
4226 if document.body[q][:5] == "\\lang":
4230 document.body[parlang+1:parlang+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4232 document.body[flt+1:flt+1] = put_cmd_in_ert("\\begin{cellvarwidth}" + alarg)
4233 needcellvarwidth = True
4235 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
4236 # with newlines, and we do not want that)
4238 endcell = find_token(document.body, "</cell>", begcell)
4240 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
4242 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
4246 nle = find_end_of_inset(document.body, nl)
4247 del(document.body[nle:nle+1])
4249 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
4251 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
4252 # Replace parbreaks in multirow with \\endgraf
4253 if multirow == True:
4254 flt = find_token(document.body, "\\begin_layout", begcell, endcell)
4257 elt = find_end_of_layout(document.body, flt)
4259 document.warning("Malformed LyX document! Missing layout end.")
4261 endcell = find_token(document.body, "</cell>", begcell)
4262 flt = find_token(document.body, "\\begin_layout", elt, endcell)
4265 document.body[elt : flt + 1] = put_cmd_in_ert("\\endgraf{}")
4271 if needarray == True:
4272 add_to_preamble(document, ["\\usepackage{array}"])
4273 if needcellvarwidth == True:
4274 add_to_preamble(document, ["%% Variable width box for table cells",
4275 "\\newenvironment{cellvarwidth}[1][t]",
4276 " {\\begin{varwidth}[#1]{\\linewidth}}",
4277 " {\\@finalstrut\\@arstrutbox\\end{varwidth}}"])
4278 if needvarwidth == True:
4279 add_to_preamble(document, ["\\usepackage{varwidth}"])
4282 def convert_vcolumns2(document):
4283 """Convert varwidth ERT to native"""
4287 i = find_token(document.body, "\\begin_inset Tabular", i+1)
4290 j = find_end_of_inset(document.body, i)
4292 document.warning("Malformed LyX document: Could not find end of tabular.")
4296 nrows = int(document.body[i+1].split('"')[3])
4297 ncols = int(document.body[i+1].split('"')[5])
4300 for row in range(nrows):
4301 for col in range(ncols):
4302 m = find_token(document.body, "<cell", m)
4303 multirow = get_option_value(document.body[m], 'multirow') != ""
4305 endcell = find_token(document.body, "</cell>", begcell)
4307 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4309 vcand = document.body[cvw - 1] == "\\backslash" and get_containing_inset(document.body, cvw)[0] == "ERT"
4311 # Remove ERTs with cellvarwidth env
4312 ecvw = find_token(document.body, "end{cellvarwidth}", begcell, endcell)
4314 if document.body[ecvw - 1] == "\\backslash":
4315 eertins = get_containing_inset(document.body, ecvw)
4316 if eertins and eertins[0] == "ERT":
4317 del document.body[eertins[1] : eertins[2] + 1]
4319 cvw = find_token(document.body, "begin{cellvarwidth}", begcell, endcell)
4320 ertins = get_containing_inset(document.body, cvw)
4321 if ertins and ertins[0] == "ERT":
4322 del(document.body[ertins[1] : ertins[2] + 1])
4324 # Convert ERT newlines (as cellvarwidth detection relies on that)
4326 endcell = find_token(document.body, "</cell>", begcell)
4327 nl = find_token(document.body, "\\backslash", begcell, endcell)
4328 if nl == -1 or document.body[nl + 2] != "\\backslash":
4330 ertins = get_containing_inset(document.body, nl)
4331 if ertins and ertins[0] == "ERT":
4332 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline newline", "", "\\end_inset"]
4334 # Same for linebreaks
4336 endcell = find_token(document.body, "</cell>", begcell)
4337 nl = find_token(document.body, "linebreak", begcell, endcell)
4338 if nl == -1 or document.body[nl - 1] != "\\backslash":
4340 ertins = get_containing_inset(document.body, nl)
4341 if ertins and ertins[0] == "ERT":
4342 document.body[ertins[1] : ertins[2] + 1] = ["\\begin_inset Newline linebreak", "", "\\end_inset"]
4345 if multirow == True:
4346 endcell = find_token(document.body, "</cell>", begcell)
4347 nl = find_token(document.body, "endgraf{}", begcell, endcell)
4348 if nl == -1 or document.body[nl - 1] != "\\backslash":
4350 ertins = get_containing_inset(document.body, nl)
4351 if ertins and ertins[0] == "ERT":
4352 document.body[ertins[1] : ertins[2] + 1] = ["\\end_layout", "", "\\begin_layout Plain Layout"]
4358 del_complete_lines(document.preamble,
4359 ['% Added by lyx2lyx',
4360 '%% Variable width box for table cells',
4361 r'\newenvironment{cellvarwidth}[1][t]',
4362 r' {\begin{varwidth}[#1]{\linewidth}}',
4363 r' {\@finalstrut\@arstrutbox\end{varwidth}}'])
4364 del_complete_lines(document.preamble,
4365 ['% Added by lyx2lyx',
4366 r'\usepackage{varwidth}'])
4369 frontispiece_def = [
4370 r'### Inserted by lyx2lyx (frontispiece layout) ###',
4371 r'Style Frontispiece',
4372 r' CopyStyle Titlehead',
4373 r' LatexName frontispiece',
4378 def convert_koma_frontispiece(document):
4379 """Remove local KOMA frontispiece definition"""
4380 if document.textclass[:3] != "scr":
4383 if document.del_local_layout(frontispiece_def):
4384 document.add_module("ruby")
4387 def revert_koma_frontispiece(document):
4388 """Add local KOMA frontispiece definition"""
4389 if document.textclass[:3] != "scr":
4392 if find_token(document.body, "\\begin_layout Frontispiece", 0) != -1:
4393 document.append_local_layout(frontispiece_def)
4396 def revert_spellchecker_ignore(document):
4397 """Revert document spellchecker dictionary"""
4399 i = find_token(document.header, "\\spellchecker_ignore")
4402 del document.header[i]
4405 def revert_docbook_mathml_prefix(document):
4406 """Revert the DocBook parameter to choose the prefix for the MathML name space"""
4408 i = find_token(document.header, "\\docbook_mathml_prefix")
4411 del document.header[i]
4414 def revert_document_metadata(document):
4415 """Revert document metadata"""
4418 i = find_token(document.header, "\\begin_metadata", i)
4421 j = find_end_of(document.header, i, "\\begin_metadata", "\\end_metadata")
4423 # this should not happen
4425 document.header[i : j + 1] = []
4428 def revert_index_macros(document):
4429 " Revert inset index macros "
4433 # trailing blank needed here to exclude IndexMacro insets
4434 i = find_token(document.body, '\\begin_inset Index ', i+1)
4437 j = find_end_of_inset(document.body, i)
4439 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4441 pl = find_token(document.body, '\\begin_layout Plain Layout', i, j)
4443 document.warning("Malformed LyX document: Can't find plain layout in index inset at line %d" % i)
4445 # find, store and remove inset params
4446 pr = find_token(document.body, 'range', i, pl)
4447 prval = get_quoted_value(document.body, "range", pr)
4449 if prval == "start":
4451 elif prval == "end":
4453 pf = find_token(document.body, 'pageformat', i, pl)
4454 pageformat = get_quoted_value(document.body, "pageformat", pf)
4455 del document.body[pr:pf+1]
4456 # Now re-find (potentially moved) inset end again, and search for subinsets
4457 j = find_end_of_inset(document.body, i)
4459 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4461 # We search for all possible subentries in turn, store their
4462 # content and delete them
4468 # Two subentries are allowed, thus the duplication
4469 imacros = ["seealso", "see", "subentry", "subentry", "sortkey"]
4470 for imacro in imacros:
4471 iim = find_token(document.body, "\\begin_inset IndexMacro %s" % imacro, i, j)
4474 iime = find_end_of_inset(document.body, iim)
4476 document.warning("Malformed LyX document: Can't find end of index macro inset at line %d" % i)
4478 iimpl = find_token(document.body, '\\begin_layout Plain Layout', iim, iime)
4480 document.warning("Malformed LyX document: Can't find plain layout in index macro inset at line %d" % i)
4482 iimple = find_end_of_layout(document.body, iimpl)
4484 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4486 icont = document.body[iimpl:iimple]
4487 if imacro == "seealso":
4489 elif imacro == "see":
4491 elif imacro == "subentry":
4492 # subentries might hace their own sortkey!
4493 xiim = find_token(document.body, "\\begin_inset IndexMacro sortkey", iimpl, iimple)
4495 xiime = find_end_of_inset(document.body, xiim)
4497 document.warning("Malformed LyX document: Can't find end of index macro inset at line %d" % i)
4499 xiimpl = find_token(document.body, '\\begin_layout Plain Layout', xiim, xiime)
4501 document.warning("Malformed LyX document: Can't find plain layout in index macro inset at line %d" % i)
4503 xiimple = find_end_of_layout(document.body, xiimpl)
4505 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4508 xicont = document.body[xiimpl+1:xiimple]
4509 # everything before ................... or after
4510 xxicont = document.body[iimpl+1:xiim] + document.body[xiime+1:iimple]
4511 # construct the latex sequence
4512 icont = xicont + put_cmd_in_ert("@") + xxicont[1:]
4513 if len(subentry) > 0:
4514 if (icont[0] == "\\begin_layout Plain Layout"):
4515 subentry2 = icont[1:]
4519 if (icont[0] == "\\begin_layout Plain Layout"):
4520 subentry = icont[1:]
4523 elif imacro == "sortkey":
4525 # Everything stored. Delete subinset.
4526 del document.body[iim:iime+1]
4527 # Again re-find (potentially moved) index inset end
4528 j = find_end_of_inset(document.body, i)
4530 document.warning("Malformed LyX document: Can't find end of index inset at line %d" % i)
4532 # Now insert all stuff, starting from the inset end
4533 pl = find_token(document.body, '\\begin_layout Plain Layout', i, j)
4535 document.warning("Malformed LyX document: Can't find plain layout in index inset at line %d" % i)
4537 ple = find_end_of_layout(document.body, pl)
4539 document.warning("Malformed LyX document: Can't find end of index macro inset plain layout at line %d" % i)
4542 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + "see{") + see + put_cmd_in_ert("}")
4543 elif len(seealso) > 0:
4544 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + "seealso{") + seealso + put_cmd_in_ert("}")
4545 elif pageformat != "default":
4546 document.body[ple:ple] = put_cmd_in_ert("|" + pagerange + pageformat)
4547 if len(subentry2) > 0:
4548 document.body[ple:ple] = put_cmd_in_ert("!") + subentry2
4549 if len(subentry) > 0:
4550 document.body[ple:ple] = put_cmd_in_ert("!") + subentry
4551 if len(sortkey) > 0:
4552 document.body[pl:pl+1] = document.body[pl:pl] + sortkey + put_cmd_in_ert("@")
4555 def revert_starred_refs(document):
4556 " Revert starred refs "
4557 i = find_token(document.header, "\\use_hyperref true", 0)
4558 use_hyperref = (i != -1)
4566 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
4570 end = find_end_of_inset(document.body, i)
4572 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
4575 # If we are not using hyperref, then we just need to delete the line
4576 if not use_hyperref:
4577 k = find_token(document.body, "nolink", i, end)
4581 del document.body[k]
4584 # If we are using hyperref, then we'll need to do more.
4588 # so we are in an InsetRef
4591 # If nolink is False, just remove that line
4592 if nolink == False or cmd == "formatted" or cmd == "labelonly":
4593 # document.warning("Skipping " + cmd + " " + ref)
4594 if nolinkline != -1:
4595 del document.body[nolinkline]
4598 # We need to construct a new command and put it in ERT
4599 newcmd = "\\" + cmd + "*{" + ref + "}"
4600 # document.warning(newcmd)
4601 newlines = put_cmd_in_ert(newcmd)
4602 document.body[start:end+1] = newlines
4603 i += len(newlines) - (end - start) + 1
4609 l = document.body[i]
4610 if l.startswith("LatexCommand"):
4612 elif l.startswith("reference"):
4614 elif l.startswith("nolink"):
4616 nolink = (tmp == "true")
4621 def convert_starred_refs(document):
4622 " Convert starred refs "
4625 i = find_token(document.body, "\\begin_inset CommandInset ref", i)
4628 end = find_end_of_inset(document.body, i)
4630 document.warning("Malformed LyX document: Can't find end of inset at line %d" % i)
4634 document.body.insert(newlineat, "nolink \"false\"")
4638 def revert_familydefault(document):
4639 " Revert \\font_default_family for non-TeX fonts "
4641 if find_token(document.header, "\\use_non_tex_fonts true", 0) == -1:
4644 i = find_token(document.header, "\\font_default_family", 0)
4646 document.warning("Malformed LyX document: Can't find \\font_default_family header")
4649 dfamily = get_value(document.header, "\\font_default_family", i)
4650 if dfamily == "default":
4653 document.header[i] = "\\font_default_family default"
4654 add_to_preamble(document, ["\\renewcommand{\\familydefault}{\\" + dfamily + "}"])
4657 def convert_hyper_other(document):
4658 " Classify \"run:\" links as other "
4662 i = find_token(document.body, "\\begin_inset CommandInset href", i)
4665 j = find_end_of_inset(document.body, i)
4667 document.warning("Cannot find end of inset at line " << str(i))
4670 k = find_token(document.body, "type \"", i, j)
4672 # not a "Web" type. Continue.
4675 t = find_token(document.body, "target", i, j)
4677 document.warning("Malformed hyperlink inset at line " + str(i))
4680 if document.body[t][8:12] == "run:":
4681 document.body.insert(t, "type \"other\"")
4685 def revert_hyper_other(document):
4686 " Revert other link type to ERT and \"run:\" to Web "
4690 i = find_token(document.body, "\\begin_inset CommandInset href", i)
4693 j = find_end_of_inset(document.body, i)
4695 document.warning("Cannot find end of inset at line " << str(i))
4698 k = find_token(document.body, "type \"other\"", i, j)
4703 n = find_token(document.body, "name", i, j)
4704 t = find_token(document.body, "target", i, j)
4705 if n == -1 or t == -1:
4706 document.warning("Malformed hyperlink inset at line " + str(i))
4709 name = document.body[n][6:-1]
4710 target = document.body[t][8:-1]
4711 if target[:4] == "run:":
4712 del document.body[k]
4714 cmd = "\href{" + target + "}{" + name + "}"
4715 ecmd = put_cmd_in_ert(cmd)
4716 document.body[i:j+1] = ecmd
4721 "aa" : "Acknowledgments",
4722 "aapaper" : "Acknowledgments",
4723 "aastex" : "Acknowledgments",
4724 "aastex62" : "Acknowledgments",
4725 "achemso" : "Acknowledgments",
4726 "acmart" : "Acknowledgments",
4727 "AEA" : "Acknowledgments",
4728 "apa" : "Acknowledgments",
4729 "copernicus" : "Acknowledgments",
4730 "egs" : "Acknowledgments",# + Acknowledgment
4731 "elsart" : "Acknowledgment",
4732 "isprs" : "Acknowledgments",
4733 "iucr" : "Acknowledgments",
4734 "kluwer" : "Acknowledgments",
4735 "svglobal3" : "Acknowledgments",
4736 "svglobal" : "Acknowledgment",
4737 "svjog" : "Acknowledgment",
4738 "svmono" : "Acknowledgment",
4739 "svmult" : "Acknowledgment",
4740 "svprobth" : "Acknowledgment",
4744 "aa" : "Acknowledgement",
4745 "aapaper" : "Acknowledgement",
4746 "aastex" : "Acknowledgement",
4747 "aastex62" : "Acknowledgement",
4748 "achemso" : "Acknowledgement",
4749 "acmart" : "Acknowledgements",
4750 "AEA" : "Acknowledgement",
4751 "apa" : "Acknowledgements",
4752 "copernicus" : "Acknowledgements",
4753 "egs" : "Acknowledgements",# + Acknowledgement
4754 "elsart" : "Acknowledegment",
4755 "isprs" : "Acknowledgements",
4756 "iucr" : "Acknowledgements",
4757 "kluwer" : "Acknowledgements",
4758 "svglobal3" : "Acknowledgements",
4759 "svglobal" : "Acknowledgement",
4760 "svjog" : "Acknowledgement",
4761 "svmono" : "Acknowledgement",
4762 "svmult" : "Acknowledgement",
4763 "svprobth" : "Acknowledgement",
4767 def convert_acknowledgment(document):
4768 " Fix spelling of acknowledgment styles "
4770 if document.textclass not in list(ack_layouts_old.keys()):
4775 i = find_token(document.body, '\\begin_layout ' + ack_layouts_old[document.textclass], i)
4778 document.body[i] = "\\begin_layout " + ack_layouts_new[document.textclass]
4779 if document.textclass != "egs":
4781 # egs has two styles
4784 i = find_token(document.body, '\\begin_layout Acknowledgement', i)
4787 document.body[i] = "\\begin_layout Acknowledgment"
4790 def revert_acknowledgment(document):
4791 " Restore old spelling of acknowledgment styles "
4793 if document.textclass not in list(ack_layouts_new.keys()):
4797 i = find_token(document.body, '\\begin_layout ' + ack_layouts_new[document.textclass], i)
4800 document.body[i] = "\\begin_layout " + ack_layouts_old[document.textclass]
4801 if document.textclass != "egs":
4803 # egs has two styles
4806 i = find_token(document.body, '\\begin_layout Acknowledgment', i)
4809 document.body[i] = "\\begin_layout Acknowledgement"
4813 r'### Inserted by lyx2lyx (ams extended theorems) ###',
4814 r'### This requires theorems-ams-extended module to be loaded',
4815 r'Style Acknowledgement',
4816 r' CopyStyle Remark',
4817 r' LatexName acknowledgement',
4818 r' LabelString "Acknowledgement \thetheorem."',
4820 r' \theoremstyle{remark}',
4821 r' \newtheorem{acknowledgement}[thm]{\protect\acknowledgementname}',
4824 r' \providecommand{\acknowledgementname}{_(Acknowledgement)}',
4825 r' EndLangPreamble',
4827 r' \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}',
4828 r' EndBabelPreamble',
4829 r' DocBookTag para',
4830 r' DocBookAttr role="acknowledgement"',
4831 r' DocBookItemTag ""',
4835 ackStar_theorem_def = [
4836 r'### Inserted by lyx2lyx (ams extended theorems) ###',
4837 r'### This requires a theorems-ams-extended-* module to be loaded',
4838 r'Style Acknowledgement*',
4839 r' CopyStyle Remark*',
4840 r' LatexName acknowledgement*',
4841 r' LabelString "Acknowledgement."',
4843 r' \theoremstyle{remark}',
4844 r' \newtheorem*{acknowledgement*}{\protect\acknowledgementname}',
4847 r' \providecommand{\acknowledgementname}{_(Acknowledgement)}',
4848 r' EndLangPreamble',
4850 r' \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}',
4851 r' EndBabelPreamble',
4852 r' DocBookTag para',
4853 r' DocBookAttr role="acknowledgement"',
4854 r' DocBookItemTag ""',
4858 ack_bytype_theorem_def = [
4859 r'### Inserted by lyx2lyx (ams extended theorems) ###',
4860 r'### This requires theorems-ams-extended-bytype module to be loaded',
4861 r'Counter acknowledgement',
4862 r' GuiName Acknowledgment',
4864 r'Style Acknowledgement',
4865 r' CopyStyle Remark',
4866 r' LatexName acknowledgement',
4867 r' LabelString "Acknowledgement \theacknowledgement."',
4869 r' \theoremstyle{remark}',
4870 r' \newtheorem{acknowledgement}{\protect\acknowledgementname}',
4873 r' \providecommand{\acknowledgementname}{_(Acknowledgement)}',
4874 r' EndLangPreamble',
4876 r' \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}',
4877 r' EndBabelPreamble',
4878 r' DocBookTag para',
4879 r' DocBookAttr role="acknowledgement"',
4880 r' DocBookItemTag ""',
4884 ack_chap_bytype_theorem_def = [
4885 r'### Inserted by lyx2lyx (ams extended theorems) ###',
4886 r'### This requires theorems-ams-extended-chap-bytype module to be loaded',
4887 r'Counter acknowledgement',
4888 r' GuiName Acknowledgment',
4891 r'Style Acknowledgement',
4892 r' CopyStyle Remark',
4893 r' LatexName acknowledgement',
4894 r' LabelString "Acknowledgement \theacknowledgement."',
4896 r' \theoremstyle{remark}',
4897 r' \ifx\thechapter\undefined',
4898 r' \newtheorem{acknowledgement}{\protect\acknowledgementname}',
4900 r' \newtheorem{acknowledgement}{\protect\acknowledgementname}[chapter]',
4904 r' \providecommand{\acknowledgementname}{_(Acknowledgement)}',
4905 r' EndLangPreamble',
4907 r' \addto\captions$$lang{\renewcommand{\acknowledgementname}{_(Acknowledgement)}}',
4908 r' EndBabelPreamble',
4909 r' DocBookTag para',
4910 r' DocBookAttr role="acknowledgement"',
4911 r' DocBookItemTag ""',
4915 def convert_ack_theorems(document):
4916 """Put removed acknowledgement theorems to local layout"""
4920 if "theorems-ams-extended-bytype" in document.get_module_list():
4923 if haveAck and haveStarAck:
4925 i = find_token(document.body, '\\begin_layout Acknowledgement', i)
4928 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
4929 document.append_local_layout(ackStar_theorem_def)
4932 document.append_local_layout(ack_bytype_theorem_def)
4935 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
4938 if haveAck and haveStarAck:
4940 i = find_token(document.body, '\\begin_layout Acknowledgement', i)
4943 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
4944 document.append_local_layout(ackStar_theorem_def)
4947 document.append_local_layout(ack_chap_bytype_theorem_def)
4950 elif "theorems-ams-extended" in document.get_module_list():
4953 if haveAck and haveStarAck:
4955 i = find_token(document.body, '\\begin_layout Acknowledgement', i)
4958 if document.body[i] == "\\begin_layout Acknowledgement*" and not haveStarAck:
4959 document.append_local_layout(ackStar_theorem_def)
4962 document.append_local_layout(ack_theorem_def)
4967 def revert_ack_theorems(document):
4968 """Remove acknowledgement theorems from local layout"""
4969 if "theorems-ams-extended-bytype" in document.get_module_list():
4970 document.del_local_layout(ackStar_theorem_def)
4971 document.del_local_layout(ack_bytype_theorem_def)
4972 elif "theorems-ams-extended-chap-bytype" in document.get_module_list():
4973 document.del_local_layout(ackStar_theorem_def)
4974 document.del_local_layout(ack_chap_bytype_theorem_def)
4975 elif "theorems-ams-extended" in document.get_module_list():
4976 document.del_local_layout(ackStar_theorem_def)
4977 document.del_local_layout(ack_theorem_def)
4979 def revert_empty_macro(document):
4980 '''Remove macros with empty LaTeX part'''
4983 i = find_token(document.body, '\\begin_inset FormulaMacro', i)
4986 cmd = document.body[i+1]
4987 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
4990 j = find_end_of_inset(document.body, i)
4991 document.body[i:j+1] = []
4994 def convert_empty_macro(document):
4995 '''In the unlikely event someone defined a macro with empty LaTeX, add {}'''
4998 i = find_token(document.body, '\\begin_inset FormulaMacro', i)
5001 cmd = document.body[i+1]
5002 if cmd[-3:] != "}{}" and cmd[-3:] != "]{}":
5005 newstr = cmd[:-2] + "{\\{\\}}"
5006 document.body[i+1] = newstr
5010 def convert_cov_options(document):
5011 """Update examples item argument structure"""
5013 if "linguistics" not in document.get_module_list():
5016 layouts = ["Numbered Examples (consecutive)", "Subexample"]
5018 for layout in layouts:
5021 i = find_token(document.body, "\\begin_layout %s" % layout, i)
5024 j = find_end_of_layout(document.body, i)
5026 document.warning("Malformed LyX document: Can't find end of example layout at line %d" % i)
5029 k = find_token(document.body, '\\begin_inset Argument item:1', i, j)
5031 document.body[k] = '\\begin_inset Argument item:2'
5033 # Shift gloss arguments
5036 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
5039 j = find_end_of_inset(document.body, i)
5041 document.warning("Malformed LyX document: Can't find end of gloss inset at line %d" % i)
5044 k = find_token(document.body, '\\begin_inset Argument post:2', i, j)
5046 document.body[k] = '\\begin_inset Argument post:4'
5047 k = find_token(document.body, '\\begin_inset Argument post:1', i, j)
5049 document.body[k] = '\\begin_inset Argument post:2'
5054 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
5057 j = find_end_of_inset(document.body, i)
5059 document.warning("Malformed LyX document: Can't find end of gloss inset at line %d" % i)
5062 k = find_token(document.body, '\\begin_inset Argument post:3', i, j)
5064 document.body[k] = '\\begin_inset Argument post:6'
5065 k = find_token(document.body, '\\begin_inset Argument post:2', i, j)
5067 document.body[k] = '\\begin_inset Argument post:4'
5068 k = find_token(document.body, '\\begin_inset Argument post:1', i, j)
5070 document.body[k] = '\\begin_inset Argument post:2'
5074 def revert_linggloss2(document):
5075 " Revert gloss with new args to ERT "
5077 if not "linguistics" in document.get_module_list():
5081 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
5082 for glosse in glosses:
5085 i = find_token(document.body, glosse, i+1)
5088 j = find_end_of_inset(document.body, i)
5090 document.warning("Malformed LyX document: Can't find end of Gloss inset")
5093 # Check if we have new options
5094 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
5096 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
5098 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
5103 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
5104 endarg = find_end_of_inset(document.body, arg)
5107 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5108 if argbeginPlain == -1:
5109 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5111 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5112 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
5114 # remove Arg insets and paragraph, if it only contains this inset
5115 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5116 del document.body[arg - 1 : endarg + 4]
5118 del document.body[arg : endarg + 1]
5120 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
5121 endarg = find_end_of_inset(document.body, arg)
5124 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5125 if argbeginPlain == -1:
5126 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
5128 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5129 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
5131 # remove Arg insets and paragraph, if it only contains this inset
5132 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5133 del document.body[arg - 1 : endarg + 4]
5135 del document.body[arg : endarg + 1]
5137 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
5138 endarg = find_end_of_inset(document.body, arg)
5141 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5142 if argbeginPlain == -1:
5143 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
5145 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5146 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
5148 # remove Arg insets and paragraph, if it only contains this inset
5149 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5150 del document.body[arg - 1 : endarg + 4]
5152 del document.body[arg : endarg + 1]
5154 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
5155 endarg = find_end_of_inset(document.body, arg)
5158 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5159 if argbeginPlain == -1:
5160 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
5162 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5163 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
5165 # remove Arg insets and paragraph, if it only contains this inset
5166 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5167 del document.body[arg - 1 : endarg + 4]
5169 del document.body[arg : endarg + 1]
5171 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
5172 endarg = find_end_of_inset(document.body, arg)
5175 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5176 if argbeginPlain == -1:
5177 document.warning("Malformed LyX document: Can't find arg 4 plain Layout")
5179 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5180 marg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
5182 # remove Arg insets and paragraph, if it only contains this inset
5183 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5184 del document.body[arg - 1 : endarg + 4]
5186 del document.body[arg : endarg + 1]
5188 arg = find_token(document.body, "\\begin_inset Argument post:5", i, j)
5189 endarg = find_end_of_inset(document.body, arg)
5192 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5193 if argbeginPlain == -1:
5194 document.warning("Malformed LyX document: Can't find arg 5 plain Layout")
5196 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5197 marg5content = document.body[argbeginPlain + 1 : argendPlain - 2]
5199 # remove Arg insets and paragraph, if it only contains this inset
5200 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5201 del document.body[arg - 1 : endarg + 4]
5203 del document.body[arg : endarg + 1]
5205 arg = find_token(document.body, "\\begin_inset Argument post:6", i, j)
5206 endarg = find_end_of_inset(document.body, arg)
5209 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5210 if argbeginPlain == -1:
5211 document.warning("Malformed LyX document: Can't find arg 6 plain Layout")
5213 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5214 marg6content = document.body[argbeginPlain + 1 : argendPlain - 2]
5216 # remove Arg insets and paragraph, if it only contains this inset
5217 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5218 del document.body[arg - 1 : endarg + 4]
5220 del document.body[arg : endarg + 1]
5223 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
5226 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
5227 endInset = find_end_of_inset(document.body, i)
5228 endPlain = find_end_of_layout(document.body, beginPlain)
5229 precontent = put_cmd_in_ert(cmd)
5230 if len(optargcontent) > 0:
5231 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
5232 precontent += put_cmd_in_ert("{")
5234 postcontent = put_cmd_in_ert("}")
5235 if len(marg1content) > 0:
5236 postcontent += put_cmd_in_ert("[") + marg1content + put_cmd_in_ert("]")
5237 postcontent += put_cmd_in_ert("{") + marg2content + put_cmd_in_ert("}")
5238 if len(marg3content) > 0:
5239 postcontent += put_cmd_in_ert("[") + marg3content + put_cmd_in_ert("]")
5240 postcontent += put_cmd_in_ert("{") + marg4content + put_cmd_in_ert("}")
5241 if cmd == "\\trigloss":
5242 if len(marg5content) > 0:
5243 postcontent += put_cmd_in_ert("[") + marg5content + put_cmd_in_ert("]")
5244 postcontent += put_cmd_in_ert("{") + marg6content + put_cmd_in_ert("}")
5246 document.body[endPlain:endInset + 1] = postcontent
5247 document.body[beginPlain + 1:beginPlain] = precontent
5248 del document.body[i : beginPlain + 1]
5250 document.append_local_layout("Requires covington")
5255 def revert_exarg2(document):
5256 " Revert linguistic examples with new arguments to ERT "
5258 if not "linguistics" in document.get_module_list():
5263 layouts = ["Numbered Example", "Subexample"]
5265 for layout in layouts:
5268 i = find_token(document.body, "\\begin_layout %s" % layout, i+1)
5271 j = find_end_of_layout(document.body, i)
5273 document.warning("Malformed LyX document: Can't find end of example layout")
5275 consecex = document.body[i] == "\\begin_layout Numbered Examples (consecutive)"
5276 subexpl = document.body[i] == "\\begin_layout Subexample"
5277 singleex = document.body[i] == "\\begin_layout Numbered Examples (multiline)"
5278 layouttype = "\\begin_layout Numbered Examples (multiline)"
5280 layouttype = "\\begin_layout Numbered Examples (consecutive)"
5282 layouttype = "\\begin_layout Subexample"
5288 m = find_end_of_layout(document.body, k)
5289 # check for consecutive layouts
5290 k = find_token(document.body, "\\begin_layout", m)
5291 if k == -1 or document.body[k] != layouttype:
5293 l = find_end_of_layout(document.body, k)
5295 document.warning("Malformed LyX document: Can't find end of example layout")
5298 arg = find_token(document.body, "\\begin_inset Argument 1", i, l)
5299 if arg != -1 and layouttype != "\\begin_layout " + get_containing_layout(document.body, arg)[0]:
5300 # this is not our argument!
5302 if subexpl or arg == -1:
5303 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, l)
5308 endarg = find_end_of_inset(document.body, arg)
5310 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
5311 if argbeginPlain == -1:
5312 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5314 argendPlain = find_end_of_inset(document.body, argbeginPlain)
5315 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
5316 # This is a verbatim argument
5317 optargcontent = re.sub(r'textbackslash{}', r'', optargcontent)
5320 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
5322 endiarg = find_end_of_inset(document.body, iarg)
5324 iargbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", iarg, endiarg)
5325 if iargbeginPlain == -1:
5326 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5328 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
5329 itemarg = "<" + lyx2latex(document, document.body[iargbeginPlain : iargendPlain]) + ">"
5331 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
5333 endiarg2 = find_end_of_inset(document.body, iarg2)
5335 iarg2beginPlain = find_token(document.body, "\\begin_layout Plain Layout", iarg2, endiarg2)
5336 if iarg2beginPlain == -1:
5337 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5339 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
5340 itemarg += "[" + lyx2latex(document, document.body[iarg2beginPlain : iarg2endPlain]) + "]"
5345 # remove Arg insets and paragraph, if it only contains this inset
5347 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
5348 del document.body[arg - 1 : endarg + 4]
5350 del document.body[arg : endarg + 1]
5352 iarg = find_token(document.body, "\\begin_inset Argument item:1", i, j)
5354 document.warning("Unable to re-find item:1 Argument")
5356 endiarg = find_end_of_inset(document.body, iarg)
5357 if document.body[iarg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, iarg - 1) == endiarg + 3:
5358 del document.body[iarg - 1 : endiarg + 4]
5360 del document.body[iarg : endiarg + 1]
5362 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", i, j)
5364 document.warning("Unable to re-find item:2 Argument")
5366 endiarg2 = find_end_of_inset(document.body, iarg2)
5367 if document.body[iarg2 - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3:
5368 del document.body[iarg2 - 1 : endiarg2 + 4]
5370 del document.body[iarg2 : endiarg2 + 1]
5374 envname = "examples"
5376 envname = "subexamples"
5378 cmd = put_cmd_in_ert("\\begin{" + envname + "}[" + optargcontent + "]")
5380 # re-find end of layout
5381 j = find_end_of_layout(document.body, i)
5383 document.warning("Malformed LyX document: Can't find end of Subexample layout")
5387 # check for consecutive layouts
5388 k = find_token(document.body, "\\begin_layout", l)
5389 if k == -1 or document.body[k] != layouttype:
5393 m = find_end_of_layout(document.body, k)
5394 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
5396 endiarg = find_end_of_inset(document.body, iarg)
5398 iargbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", iarg, endiarg)
5399 if iargbeginPlain == -1:
5400 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5402 iargendPlain = find_end_of_inset(document.body, iargbeginPlain)
5403 subitemarg = "<" + lyx2latex(document, document.body[iargbeginPlain : iargendPlain]) + ">"
5405 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
5407 endiarg2 = find_end_of_inset(document.body, iarg2)
5409 iarg2beginPlain = find_token(document.body, "\\begin_layout Plain Layout", iarg2, endiarg2)
5410 if iarg2beginPlain == -1:
5411 document.warning("Malformed LyX document: Can't find optarg plain Layout")
5413 iarg2endPlain = find_end_of_inset(document.body, iarg2beginPlain)
5414 subitemarg += "[" + lyx2latex(document, document.body[iarg2beginPlain : iarg2endPlain]) + "]"
5416 if subitemarg == "":
5418 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item" + subitemarg)
5419 # Refind and remove arg insets
5421 iarg = find_token(document.body, "\\begin_inset Argument item:1", k, m)
5423 document.warning("Unable to re-find item:1 Argument")
5425 endiarg = find_end_of_inset(document.body, iarg)
5426 if document.body[iarg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, iarg - 1) == endiarg + 3:
5427 del document.body[iarg - 1 : endiarg + 4]
5429 del document.body[iarg : endiarg + 1]
5431 iarg2 = find_token(document.body, "\\begin_inset Argument item:2", k, m)
5433 document.warning("Unable to re-find item:2 Argument")
5435 endiarg2 = find_end_of_inset(document.body, iarg2)
5436 if document.body[iarg2 - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, iarg2 - 1) == endiarg2 + 3:
5437 del document.body[iarg2 - 1 : endiarg2 + 4]
5439 del document.body[iarg2 : endiarg2 + 1]
5441 document.body[k : k + 1] = ["\\begin_layout Standard"]
5442 l = find_end_of_layout(document.body, k)
5444 document.warning("Malformed LyX document: Can't find end of example layout")
5447 endev = put_cmd_in_ert("\\end{" + envname + "}")
5449 document.body[l : l] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
5450 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
5451 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item" + itemarg)
5453 document.append_local_layout("Requires covington")
5457 def revert_cov_options(document):
5458 """Revert examples item argument structure"""
5460 if "linguistics" not in document.get_module_list():
5463 layouts = ["Numbered Examples (consecutive)", "Subexample"]
5465 for layout in layouts:
5468 i = find_token(document.body, "\\begin_layout %s" % layout, i)
5471 j = find_end_of_layout(document.body, i)
5473 document.warning("Malformed LyX document: Can't find end of example layout at line %d" % i)
5476 k = find_token(document.body, '\\begin_inset Argument item:2', i, j)
5478 document.body[k] = '\\begin_inset Argument item:1'
5480 # Shift gloss arguments
5483 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (2 Lines)", i)
5486 j = find_end_of_inset(document.body, i)
5488 document.warning("Malformed LyX document: Can't find end of gloss inset at line %d" % i)
5491 k = find_token(document.body, '\\begin_inset Argument post:2', i, j)
5493 document.body[k] = '\\begin_inset Argument post:1'
5494 k = find_token(document.body, '\\begin_inset Argument post:4', i, j)
5496 document.body[k] = '\\begin_inset Argument post:2'
5501 i = find_token(document.body, "\\begin_inset Flex Interlinear Gloss (3 Lines)", i)
5504 j = find_end_of_inset(document.body, i)
5506 document.warning("Malformed LyX document: Can't find end of gloss inset at line %d" % i)
5509 k = find_token(document.body, '\\begin_inset Argument post:2', i, j)
5511 document.body[k] = '\\begin_inset Argument post:1'
5512 k = find_token(document.body, '\\begin_inset Argument post:4', i, j)
5514 document.body[k] = '\\begin_inset Argument post:2'
5515 k = find_token(document.body, '\\begin_inset Argument post:6', i, j)
5517 document.body[k] = '\\begin_inset Argument post:3'
5521 def revert_expreambles(document):
5522 """Revert covington example preamble flex insets to ERT"""
5524 revert_flex_inset(document.body, "Example Preamble", "\\expreamble")
5525 revert_flex_inset(document.body, "Subexample Preamble", "\\subexpreamble")
5526 revert_flex_inset(document.body, "Example Postamble", "\\expostamble")
5527 revert_flex_inset(document.body, "Subexample Postamble", "\\subexpostamble")
5534 supported_versions = ["2.4.0", "2.4"]
5536 [545, [convert_lst_literalparam]],
5541 [550, [convert_fontenc]],
5548 [557, [convert_vcsinfo]],
5549 [558, [removeFrontMatterStyles]],
5552 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
5556 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
5557 [566, [convert_hebrew_parentheses]],
5563 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
5564 [573, [convert_inputencoding_namechange]],
5565 [574, [convert_ruby_module, convert_utf8_japanese]],
5566 [575, [convert_lineno, convert_aaencoding]],
5568 [577, [convert_linggloss]],
5572 [581, [convert_osf]],
5573 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
5574 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
5576 [585, [convert_pagesizes]],
5578 [587, [convert_pagesizenames]],
5580 [589, [convert_totalheight]],
5581 [590, [convert_changebars]],
5582 [591, [convert_postpone_fragile]],
5584 [593, [convert_counter_maintenance]],
5587 [596, [convert_parskip]],
5588 [597, [convert_libertinus_rm_fonts]],
5592 [601, [convert_math_refs]],
5593 [602, [convert_branch_colors]],
5596 [605, [convert_vcolumns2]],
5597 [606, [convert_koma_frontispiece]],
5603 [612, [convert_starred_refs]],
5605 [614, [convert_hyper_other]],
5606 [615, [convert_acknowledgment,convert_ack_theorems]],
5607 [616, [convert_empty_macro]],
5608 [617, [convert_cov_options]]
5611 revert = [[616, [revert_expreambles,revert_exarg2,revert_linggloss2,revert_cov_options]],
5612 [615, [revert_empty_macro]],
5613 [614, [revert_ack_theorems,revert_acknowledgment]],
5614 [613, [revert_hyper_other]],
5615 [612, [revert_familydefault]],
5616 [611, [revert_starred_refs]],
5618 [609, [revert_index_macros]],
5619 [608, [revert_document_metadata]],
5620 [607, [revert_docbook_mathml_prefix]],
5621 [606, [revert_spellchecker_ignore]],
5622 [605, [revert_koma_frontispiece]],
5623 [604, [revert_vcolumns2]],
5624 [603, [revert_branch_darkcols]],
5625 [602, [revert_darkmode_graphics]],
5626 [601, [revert_branch_colors]],
5628 [599, [revert_math_refs]],
5629 [598, [revert_hrquotes]],
5630 [598, [revert_nopagebreak]],
5631 [597, [revert_docbook_table_output]],
5632 [596, [revert_libertinus_rm_fonts,revert_libertinus_sftt_fonts]],
5633 [595, [revert_parskip,revert_line_vspaces]],
5634 [594, [revert_ams_spaces]],
5635 [593, [revert_counter_inset]],
5636 [592, [revert_counter_maintenance]],
5637 [591, [revert_colrow_tracking]],
5638 [590, [revert_postpone_fragile]],
5639 [589, [revert_changebars]],
5640 [588, [revert_totalheight]],
5641 [587, [revert_memoir_endnotes,revert_enotez,revert_theendnotes]],
5642 [586, [revert_pagesizenames]],
5643 [585, [revert_dupqualicites]],
5644 [584, [revert_pagesizes,revert_komafontsizes]],
5645 [583, [revert_vcsinfo_rev_abbrev]],
5646 [582, [revert_ChivoFont,revert_CrimsonProFont]],
5647 [581, [revert_CantarellFont,revert_FiraFont]],
5648 [580, [revert_texfontopts,revert_osf]],
5649 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
5650 [578, [revert_babelfont]],
5651 [577, [revert_drs]],
5652 [576, [revert_linggloss, revert_subexarg]],
5653 [575, [revert_new_languages]],
5654 [574, [revert_lineno, revert_aaencoding]],
5655 [573, [revert_ruby_module, revert_utf8_japanese]],
5656 [572, [revert_inputencoding_namechange]],
5657 [571, [revert_notoFonts]],
5658 [570, [revert_cmidruletrimming]],
5659 [569, [revert_bibfileencodings]],
5660 [568, [revert_tablestyle]],
5661 [567, [revert_soul]],
5662 [566, [revert_malayalam]],
5663 [565, [revert_hebrew_parentheses]],
5664 [564, [revert_AdobeFonts]],
5665 [563, [revert_lformatinfo]],
5666 [562, [revert_listpargs]],
5667 [561, [revert_l7ninfo]],
5668 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
5669 [559, [revert_timeinfo, revert_namenoextinfo]],
5670 [558, [revert_dateinfo]],
5671 [557, [addFrontMatterStyles]],
5672 [556, [revert_vcsinfo]],
5673 [555, [revert_bibencoding]],
5674 [554, [revert_vcolumns]],
5675 [553, [revert_stretchcolumn]],
5676 [552, [revert_tuftecite]],
5677 [551, [revert_floatpclass, revert_floatalignment]],
5678 [550, [revert_nospellcheck]],
5679 [549, [revert_fontenc]],
5680 [548, []], # dummy format change
5681 [547, [revert_lscape]],
5682 [546, [revert_xcharter]],
5683 [545, [revert_paratype]],
5684 [544, [revert_lst_literalparam]]
5688 if __name__ == "__main__":