1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
186 elif font == 'CrimsonPro':
187 fm.expandFontMapping(['CrimsonPro', 'CrimsonProExtraLight,extralight', 'CrimsonProLight,light',
188 'CrimsonProMedium,medium'],
189 "roman", None, "CrimsonPro", None, "lf", "true")
191 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
192 'FiraSansThin,thin', 'FiraSansLight,light',
193 'FiraSansExtralight,extralight',
194 'FiraSansUltralight,ultralight'],
195 "sans", "sf", "FiraSans", "scaled", "lf", "true")
196 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
199 def convert_fonts(document, fm, osfoption = "osf"):
200 " Handle font definition (LaTeX preamble -> native) "
202 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
203 rscaleopt = re.compile(r'^scaled?=(.*)')
205 # Check whether we go beyond font option feature introduction
206 haveFontOpts = document.end_format > 580
209 while i < len(document.preamble):
210 i = find_re(document.preamble, rpkg, i+1)
213 mo = rpkg.search(document.preamble[i])
214 if mo == None or mo.group(2) == None:
217 options = mo.group(2).replace(' ', '').split(",")
222 while o < len(options):
223 if options[o] == osfoption:
227 mo = rscaleopt.search(options[o])
235 if not pkg in fm.pkginmap:
240 # Try with name-option combination first
241 # (only one default option supported currently)
243 while o < len(options):
245 fn = fm.getfontname(pkg, [opt])
252 fn = fm.getfontname(pkg, [])
254 fn = fm.getfontname(pkg, options)
257 del document.preamble[i]
258 fontinfo = fm.font2pkgmap[fn]
259 if fontinfo.scaletype == None:
262 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
263 fontinfo.scaleval = oscale
264 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
265 if fontinfo.osfopt == None:
266 options.extend(osfoption)
268 osf = find_token(document.header, "\\font_osf false")
269 osftag = "\\font_osf"
270 if osf == -1 and fontinfo.fonttype != "math":
271 # Try with newer format
272 osftag = "\\font_" + fontinfo.fonttype + "_osf"
273 osf = find_token(document.header, osftag + " false")
275 document.header[osf] = osftag + " true"
276 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
277 del document.preamble[i-1]
279 if fontscale != None:
280 j = find_token(document.header, fontscale, 0)
282 val = get_value(document.header, fontscale, j)
286 scale = "%03d" % int(float(oscale) * 100)
287 document.header[j] = fontscale + " " + scale + " " + vals[1]
288 ft = "\\font_" + fontinfo.fonttype
289 j = find_token(document.header, ft, 0)
291 val = get_value(document.header, ft, j)
292 words = val.split() # ! splits also values like '"DejaVu Sans"'
293 words[0] = '"' + fn + '"'
294 document.header[j] = ft + ' ' + ' '.join(words)
295 if haveFontOpts and fontinfo.fonttype != "math":
296 fotag = "\\font_" + fontinfo.fonttype + "_opts"
297 fo = find_token(document.header, fotag)
299 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
301 # Sensible place to insert tag
302 fo = find_token(document.header, "\\font_sf_scale")
304 document.warning("Malformed LyX document! Missing \\font_sf_scale")
306 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
310 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
311 " Revert native font definition to LaTeX "
312 # fonlist := list of fonts created from the same package
313 # Empty package means that the font-name is the same as the package-name
314 # fontmap (key = package, val += found options) will be filled
315 # and used later in add_preamble_fonts() to be added to user-preamble
317 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
318 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
320 while i < len(document.header):
321 i = find_re(document.header, rfontscale, i+1)
324 mo = rfontscale.search(document.header[i])
327 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
328 val = get_value(document.header, ft, i)
329 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
330 font = words[0].strip('"') # TeX font name has no whitespace
331 if not font in fm.font2pkgmap:
333 fontinfo = fm.font2pkgmap[font]
334 val = fontinfo.package
335 if not val in fontmap:
338 if OnlyWithXOpts or WithXOpts:
339 if ft == "\\font_math":
341 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
342 if ft == "\\font_sans":
343 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
344 elif ft == "\\font_typewriter":
345 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
346 x = find_re(document.header, regexp, 0)
347 if x == -1 and OnlyWithXOpts:
351 # We need to use this regex since split() does not handle quote protection
352 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
353 opts = xopts[1].strip('"').split(",")
354 fontmap[val].extend(opts)
355 del document.header[x]
356 words[0] = '"default"'
357 document.header[i] = ft + ' ' + ' '.join(words)
358 if fontinfo.scaleopt != None:
359 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
360 mo = rscales.search(xval)
365 # set correct scale option
366 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
367 if fontinfo.osfopt != None:
369 if fontinfo.osfdef == "true":
371 osf = find_token(document.header, "\\font_osf " + oldval)
372 if osf == -1 and ft != "\\font_math":
373 # Try with newer format
374 osftag = "\\font_roman_osf " + oldval
375 if ft == "\\font_sans":
376 osftag = "\\font_sans_osf " + oldval
377 elif ft == "\\font_typewriter":
378 osftag = "\\font_typewriter_osf " + oldval
379 osf = find_token(document.header, osftag)
381 fontmap[val].extend([fontinfo.osfopt])
382 if len(fontinfo.options) > 0:
383 fontmap[val].extend(fontinfo.options)
386 ###############################################################################
388 ### Conversion and reversion routines
390 ###############################################################################
392 def convert_inputencoding_namechange(document):
393 " Rename inputencoding settings. "
394 i = find_token(document.header, "\\inputencoding", 0)
397 s = document.header[i].replace("auto", "auto-legacy")
398 document.header[i] = s.replace("default", "auto-legacy-plain")
400 def revert_inputencoding_namechange(document):
401 " Rename inputencoding settings. "
402 i = find_token(document.header, "\\inputencoding", 0)
405 s = document.header[i].replace("auto-legacy-plain", "default")
406 document.header[i] = s.replace("auto-legacy", "auto")
408 def convert_notoFonts(document):
409 " Handle Noto fonts definition to LaTeX "
411 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
412 fm = createFontMapping(['Noto'])
413 convert_fonts(document, fm)
415 def revert_notoFonts(document):
416 " Revert native Noto font definition to LaTeX "
418 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
420 fm = createFontMapping(['Noto'])
421 if revert_fonts(document, fm, fontmap):
422 add_preamble_fonts(document, fontmap)
424 def convert_latexFonts(document):
425 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
427 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
428 fm = createFontMapping(['DejaVu', 'IBM'])
429 convert_fonts(document, fm)
431 def revert_latexFonts(document):
432 " Revert native DejaVu font definition to LaTeX "
434 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
436 fm = createFontMapping(['DejaVu', 'IBM'])
437 if revert_fonts(document, fm, fontmap):
438 add_preamble_fonts(document, fontmap)
440 def convert_AdobeFonts(document):
441 " Handle Adobe Source fonts definition to LaTeX "
443 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
444 fm = createFontMapping(['Adobe'])
445 convert_fonts(document, fm)
447 def revert_AdobeFonts(document):
448 " Revert Adobe Source font definition to LaTeX "
450 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
452 fm = createFontMapping(['Adobe'])
453 if revert_fonts(document, fm, fontmap):
454 add_preamble_fonts(document, fontmap)
456 def removeFrontMatterStyles(document):
457 " Remove styles Begin/EndFrontmatter"
459 layouts = ['BeginFrontmatter', 'EndFrontmatter']
460 tokenend = len('\\begin_layout ')
463 i = find_token_exact(document.body, '\\begin_layout ', i+1)
466 layout = document.body[i][tokenend:].strip()
467 if layout not in layouts:
469 j = find_end_of_layout(document.body, i)
471 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
473 while document.body[j+1].strip() == '':
475 document.body[i:j+1] = []
477 def addFrontMatterStyles(document):
478 " Use styles Begin/EndFrontmatter for elsarticle"
480 if document.textclass != "elsarticle":
483 def insertFrontmatter(prefix, line):
485 while above > 0 and document.body[above-1].strip() == '':
488 while document.body[below].strip() == '':
490 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
491 '\\begin_inset Note Note',
493 '\\begin_layout Plain Layout',
496 '\\end_inset', '', '',
499 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
500 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
501 tokenend = len('\\begin_layout ')
505 i = find_token_exact(document.body, '\\begin_layout ', i+1)
508 layout = document.body[i][tokenend:].strip()
509 if layout not in layouts:
511 k = find_end_of_layout(document.body, i)
513 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
520 insertFrontmatter('End', k+1)
521 insertFrontmatter('Begin', first)
524 def convert_lst_literalparam(document):
525 " Add param literal to include inset "
529 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
532 j = find_end_of_inset(document.body, i)
534 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
536 while i < j and document.body[i].strip() != '':
538 document.body.insert(i, 'literal "true"')
541 def revert_lst_literalparam(document):
542 " Remove param literal from include inset "
546 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
549 j = find_end_of_inset(document.body, i)
551 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
553 del_token(document.body, 'literal', i, j)
556 def revert_paratype(document):
557 " Revert ParaType font definitions to LaTeX "
559 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
561 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
562 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
563 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
564 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
567 sfval = find_token(document.header, "\\font_sf_scale", 0)
569 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
571 sfscale = document.header[sfval].split()
574 document.header[sfval] = " ".join(sfscale)
577 sf_scale = float(val)
579 document.warning("Invalid font_sf_scale value: " + val)
582 if sf_scale != "100.0":
583 sfoption = "scaled=" + str(sf_scale / 100.0)
584 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
585 ttval = get_value(document.header, "\\font_tt_scale", 0)
590 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
591 if i1 != -1 and i2 != -1 and i3!= -1:
592 add_to_preamble(document, ["\\usepackage{paratype}"])
595 add_to_preamble(document, ["\\usepackage{PTSerif}"])
596 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
599 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
601 add_to_preamble(document, ["\\usepackage{PTSans}"])
602 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
605 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
607 add_to_preamble(document, ["\\usepackage{PTMono}"])
608 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
611 def revert_xcharter(document):
612 " Revert XCharter font definitions to LaTeX "
614 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
618 # replace unsupported font setting
619 document.header[i] = document.header[i].replace("xcharter", "default")
620 # no need for preamble code with system fonts
621 if get_bool_value(document.header, "\\use_non_tex_fonts"):
624 # transfer old style figures setting to package options
625 j = find_token(document.header, "\\font_osf true")
628 document.header[j] = "\\font_osf false"
632 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
635 def revert_lscape(document):
636 " Reverts the landscape environment (Landscape module) to TeX-code "
638 if not "landscape" in document.get_module_list():
643 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
646 j = find_end_of_inset(document.body, i)
648 document.warning("Malformed LyX document: Can't find end of Landscape inset")
651 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
654 add_to_preamble(document, ["\\usepackage{afterpage}"])
656 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
657 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
659 add_to_preamble(document, ["\\usepackage{pdflscape}"])
662 def convert_fontenc(document):
663 " Convert default fontenc setting "
665 i = find_token(document.header, "\\fontencoding global", 0)
669 document.header[i] = document.header[i].replace("global", "auto")
672 def revert_fontenc(document):
673 " Revert default fontenc setting "
675 i = find_token(document.header, "\\fontencoding auto", 0)
679 document.header[i] = document.header[i].replace("auto", "global")
682 def revert_nospellcheck(document):
683 " Remove nospellcheck font info param "
687 i = find_token(document.body, '\\nospellcheck', i)
693 def revert_floatpclass(document):
694 " Remove float placement params 'document' and 'class' "
696 del_token(document.header, "\\float_placement class")
700 i = find_token(document.body, '\\begin_inset Float', i+1)
703 j = find_end_of_inset(document.body, i)
704 k = find_token(document.body, 'placement class', i, i + 2)
706 k = find_token(document.body, 'placement document', i, i + 2)
713 def revert_floatalignment(document):
714 " Remove float alignment params "
716 galignment = get_value(document.header, "\\float_alignment", delete=True)
720 i = find_token(document.body, '\\begin_inset Float', i+1)
723 j = find_end_of_inset(document.body, i)
725 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
727 k = find_token(document.body, 'alignment', i, i+4)
731 alignment = get_value(document.body, "alignment", k)
732 if alignment == "document":
733 alignment = galignment
735 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
737 document.warning("Can't find float layout!")
740 if alignment == "left":
741 alcmd = put_cmd_in_ert("\\raggedright{}")
742 elif alignment == "center":
743 alcmd = put_cmd_in_ert("\\centering{}")
744 elif alignment == "right":
745 alcmd = put_cmd_in_ert("\\raggedleft{}")
747 document.body[l+1:l+1] = alcmd
750 def revert_tuftecite(document):
751 " Revert \cite commands in tufte classes "
753 tufte = ["tufte-book", "tufte-handout"]
754 if document.textclass not in tufte:
759 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
762 j = find_end_of_inset(document.body, i)
764 document.warning("Can't find end of citation inset at line %d!!" %(i))
766 k = find_token(document.body, "LatexCommand", i, j)
768 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
771 cmd = get_value(document.body, "LatexCommand", k)
775 pre = get_quoted_value(document.body, "before", i, j)
776 post = get_quoted_value(document.body, "after", i, j)
777 key = get_quoted_value(document.body, "key", i, j)
779 document.warning("Citation inset at line %d does not have a key!" %(i))
781 # Replace command with ERT
784 res += "[" + pre + "]"
786 res += "[" + post + "]"
789 res += "{" + key + "}"
790 document.body[i:j+1] = put_cmd_in_ert([res])
794 def revert_stretchcolumn(document):
795 " We remove the column varwidth flags or everything else will become a mess. "
798 i = find_token(document.body, "\\begin_inset Tabular", i+1)
801 j = find_end_of_inset(document.body, i+1)
803 document.warning("Malformed LyX document: Could not find end of tabular.")
805 for k in range(i, j):
806 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
807 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
808 document.body[k] = document.body[k].replace(' varwidth="true"', '')
811 def revert_vcolumns(document):
812 " Revert standard columns with line breaks etc. "
818 i = find_token(document.body, "\\begin_inset Tabular", i+1)
821 j = find_end_of_inset(document.body, i)
823 document.warning("Malformed LyX document: Could not find end of tabular.")
826 # Collect necessary column information
828 nrows = int(document.body[i+1].split('"')[3])
829 ncols = int(document.body[i+1].split('"')[5])
831 for k in range(ncols):
832 m = find_token(document.body, "<column", m)
833 width = get_option_value(document.body[m], 'width')
834 varwidth = get_option_value(document.body[m], 'varwidth')
835 alignment = get_option_value(document.body[m], 'alignment')
836 special = get_option_value(document.body[m], 'special')
837 col_info.append([width, varwidth, alignment, special, m])
842 for row in range(nrows):
843 for col in range(ncols):
844 m = find_token(document.body, "<cell", m)
845 multicolumn = get_option_value(document.body[m], 'multicolumn')
846 multirow = get_option_value(document.body[m], 'multirow')
847 width = get_option_value(document.body[m], 'width')
848 rotate = get_option_value(document.body[m], 'rotate')
849 # Check for: linebreaks, multipars, non-standard environments
851 endcell = find_token(document.body, "</cell>", begcell)
853 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
855 elif count_pars_in_inset(document.body, begcell + 2) > 1:
857 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
859 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
860 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
862 alignment = col_info[col][2]
863 col_line = col_info[col][4]
865 if alignment == "center":
866 vval = ">{\\centering}"
867 elif alignment == "left":
868 vval = ">{\\raggedright}"
869 elif alignment == "right":
870 vval = ">{\\raggedleft}"
873 vval += "V{\\linewidth}"
875 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
876 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
877 # with newlines, and we do not want that)
879 endcell = find_token(document.body, "</cell>", begcell)
881 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
883 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
887 nle = find_end_of_inset(document.body, nl)
888 del(document.body[nle:nle+1])
890 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
892 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
898 if needarray == True:
899 add_to_preamble(document, ["\\usepackage{array}"])
900 if needvarwidth == True:
901 add_to_preamble(document, ["\\usepackage{varwidth}"])
904 def revert_bibencoding(document):
905 " Revert bibliography encoding "
909 i = find_token(document.header, "\\cite_engine", 0)
911 document.warning("Malformed document! Missing \\cite_engine")
913 engine = get_value(document.header, "\\cite_engine", i)
917 if engine in ["biblatex", "biblatex-natbib"]:
920 # Map lyx to latex encoding names
924 "armscii8" : "armscii8",
925 "iso8859-1" : "latin1",
926 "iso8859-2" : "latin2",
927 "iso8859-3" : "latin3",
928 "iso8859-4" : "latin4",
929 "iso8859-5" : "iso88595",
930 "iso8859-6" : "8859-6",
931 "iso8859-7" : "iso-8859-7",
932 "iso8859-8" : "8859-8",
933 "iso8859-9" : "latin5",
934 "iso8859-13" : "latin7",
935 "iso8859-15" : "latin9",
936 "iso8859-16" : "latin10",
937 "applemac" : "applemac",
939 "cp437de" : "cp437de",
956 "utf8-platex" : "utf8",
963 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
966 j = find_end_of_inset(document.body, i)
968 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
970 encoding = get_quoted_value(document.body, "encoding", i, j)
973 # remove encoding line
974 k = find_token(document.body, "encoding", i, j)
977 if encoding == "default":
979 # Re-find inset end line
980 j = find_end_of_inset(document.body, i)
983 h = find_token(document.header, "\\biblio_options", 0)
985 biblio_options = get_value(document.header, "\\biblio_options", h)
986 if not "bibencoding" in biblio_options:
987 document.header[h] += ",bibencoding=%s" % encodings[encoding]
989 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
991 # this should not happen
992 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
994 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
996 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
997 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
1003 def convert_vcsinfo(document):
1004 " Separate vcs Info inset from buffer Info inset. "
1007 "vcs-revision" : "revision",
1008 "vcs-tree-revision" : "tree-revision",
1009 "vcs-author" : "author",
1010 "vcs-time" : "time",
1015 i = find_token(document.body, "\\begin_inset Info", i+1)
1018 j = find_end_of_inset(document.body, i+1)
1020 document.warning("Malformed LyX document: Could not find end of Info inset.")
1022 tp = find_token(document.body, 'type', i, j)
1023 tpv = get_quoted_value(document.body, "type", tp)
1026 arg = find_token(document.body, 'arg', i, j)
1027 argv = get_quoted_value(document.body, "arg", arg)
1028 if argv not in list(types.keys()):
1030 document.body[tp] = "type \"vcs\""
1031 document.body[arg] = "arg \"" + types[argv] + "\""
1034 def revert_vcsinfo(document):
1035 " Merge vcs Info inset to buffer Info inset. "
1037 args = ["revision", "tree-revision", "author", "time", "date" ]
1040 i = find_token(document.body, "\\begin_inset Info", i+1)
1043 j = find_end_of_inset(document.body, i+1)
1045 document.warning("Malformed LyX document: Could not find end of Info inset.")
1047 tp = find_token(document.body, 'type', i, j)
1048 tpv = get_quoted_value(document.body, "type", tp)
1051 arg = find_token(document.body, 'arg', i, j)
1052 argv = get_quoted_value(document.body, "arg", arg)
1053 if argv not in args:
1054 document.warning("Malformed Info inset. Invalid vcs arg.")
1056 document.body[tp] = "type \"buffer\""
1057 document.body[arg] = "arg \"vcs-" + argv + "\""
1060 def revert_dateinfo(document):
1061 " Revert date info insets to static text. "
1063 # FIXME This currently only considers the main language and uses the system locale
1064 # Ideally, it should honor context languages and switch the locale accordingly.
1066 # The date formats for each language using strftime syntax:
1067 # long, short, loclong, locmedium, locshort
1069 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1070 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1071 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1072 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1073 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1074 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1075 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1076 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1077 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1078 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1079 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1080 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1081 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1082 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1083 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1084 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1085 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1086 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1087 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1088 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1089 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1090 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1091 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1092 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1093 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1094 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1095 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1096 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1097 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1098 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1099 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1100 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1101 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1102 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1103 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1104 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1105 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1106 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1107 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1108 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1109 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1110 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1111 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1112 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1113 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1114 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1115 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1116 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1117 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1118 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1119 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1120 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1121 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1122 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1123 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1124 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1125 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1126 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1127 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1128 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1129 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1130 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1131 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1132 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1133 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1134 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1135 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1136 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1137 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1138 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1139 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1140 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1141 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1142 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1143 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1144 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1145 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1146 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1147 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1148 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1149 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1150 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1151 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1152 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1153 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1154 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1155 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1156 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1157 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1158 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1159 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1160 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1161 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1162 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1163 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1164 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1165 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1166 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1167 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1168 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1169 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1170 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1173 types = ["date", "fixdate", "moddate" ]
1174 lang = get_value(document.header, "\\language")
1176 document.warning("Malformed LyX document! No \\language header found!")
1181 i = find_token(document.body, "\\begin_inset Info", i+1)
1184 j = find_end_of_inset(document.body, i+1)
1186 document.warning("Malformed LyX document: Could not find end of Info inset.")
1188 tp = find_token(document.body, 'type', i, j)
1189 tpv = get_quoted_value(document.body, "type", tp)
1190 if tpv not in types:
1192 arg = find_token(document.body, 'arg', i, j)
1193 argv = get_quoted_value(document.body, "arg", arg)
1196 if tpv == "fixdate":
1197 datecomps = argv.split('@')
1198 if len(datecomps) > 1:
1200 isodate = datecomps[1]
1201 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1203 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1204 # FIXME if we had the path to the original document (not the one in the tmp dir),
1205 # we could use the mtime.
1206 # elif tpv == "moddate":
1207 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1210 result = dte.isodate()
1211 elif argv == "long":
1212 result = dte.strftime(dateformats[lang][0])
1213 elif argv == "short":
1214 result = dte.strftime(dateformats[lang][1])
1215 elif argv == "loclong":
1216 result = dte.strftime(dateformats[lang][2])
1217 elif argv == "locmedium":
1218 result = dte.strftime(dateformats[lang][3])
1219 elif argv == "locshort":
1220 result = dte.strftime(dateformats[lang][4])
1222 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1223 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1224 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1225 fmt = re.sub('[^\'%]d', '%d', fmt)
1226 fmt = fmt.replace("'", "")
1227 result = dte.strftime(fmt)
1228 if sys.version_info < (3,0):
1229 # In Python 2, datetime module works with binary strings,
1230 # our dateformat strings are utf8-encoded:
1231 result = result.decode('utf-8')
1232 document.body[i : j+1] = [result]
1235 def revert_timeinfo(document):
1236 " Revert time info insets to static text. "
1238 # FIXME This currently only considers the main language and uses the system locale
1239 # Ideally, it should honor context languages and switch the locale accordingly.
1240 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1243 # The time formats for each language using strftime syntax:
1246 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1247 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1248 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1249 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1250 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1251 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1252 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1253 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1254 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1255 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1256 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1257 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1258 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1259 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1260 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1261 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1262 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1263 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1264 "british" : ["%H:%M:%S %Z", "%H:%M"],
1265 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1266 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1267 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1268 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1269 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1270 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1271 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1272 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1273 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1274 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1275 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1276 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1277 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1278 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1279 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1280 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1281 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1282 "french" : ["%H:%M:%S %Z", "%H:%M"],
1283 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1284 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1285 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1286 "german" : ["%H:%M:%S %Z", "%H:%M"],
1287 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1288 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1289 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1290 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1291 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1292 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1293 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1294 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1295 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1296 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1297 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1298 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1299 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1300 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1301 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1302 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1303 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1304 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1305 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1306 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1307 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1308 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1309 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1310 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1311 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1312 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1313 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1314 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1315 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1316 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1317 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1318 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1319 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1320 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1321 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1322 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1323 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1324 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1325 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1326 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1327 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1328 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1329 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1330 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1331 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1332 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1333 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1334 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1335 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1336 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1337 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1338 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1339 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1340 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1341 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1342 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1343 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1344 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1345 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1346 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1347 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1350 types = ["time", "fixtime", "modtime" ]
1352 i = find_token(document.header, "\\language", 0)
1354 # this should not happen
1355 document.warning("Malformed LyX document! No \\language header found!")
1357 lang = get_value(document.header, "\\language", i)
1361 i = find_token(document.body, "\\begin_inset Info", i+1)
1364 j = find_end_of_inset(document.body, i+1)
1366 document.warning("Malformed LyX document: Could not find end of Info inset.")
1368 tp = find_token(document.body, 'type', i, j)
1369 tpv = get_quoted_value(document.body, "type", tp)
1370 if tpv not in types:
1372 arg = find_token(document.body, 'arg', i, j)
1373 argv = get_quoted_value(document.body, "arg", arg)
1375 dtme = datetime.now()
1377 if tpv == "fixtime":
1378 timecomps = argv.split('@')
1379 if len(timecomps) > 1:
1381 isotime = timecomps[1]
1382 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1384 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1386 m = re.search('(\d\d):(\d\d)', isotime)
1388 tme = time(int(m.group(1)), int(m.group(2)))
1389 # FIXME if we had the path to the original document (not the one in the tmp dir),
1390 # we could use the mtime.
1391 # elif tpv == "moddate":
1392 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1395 result = tme.isoformat()
1396 elif argv == "long":
1397 result = tme.strftime(timeformats[lang][0])
1398 elif argv == "short":
1399 result = tme.strftime(timeformats[lang][1])
1401 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1402 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1403 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1404 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1405 fmt = fmt.replace("'", "")
1406 result = dte.strftime(fmt)
1407 document.body[i : j+1] = result
1410 def revert_namenoextinfo(document):
1411 " Merge buffer Info inset type name-noext to name. "
1415 i = find_token(document.body, "\\begin_inset Info", i+1)
1418 j = find_end_of_inset(document.body, i+1)
1420 document.warning("Malformed LyX document: Could not find end of Info inset.")
1422 tp = find_token(document.body, 'type', i, j)
1423 tpv = get_quoted_value(document.body, "type", tp)
1426 arg = find_token(document.body, 'arg', i, j)
1427 argv = get_quoted_value(document.body, "arg", arg)
1428 if argv != "name-noext":
1430 document.body[arg] = "arg \"name\""
1433 def revert_l7ninfo(document):
1434 " Revert l7n Info inset to text. "
1438 i = find_token(document.body, "\\begin_inset Info", i+1)
1441 j = find_end_of_inset(document.body, i+1)
1443 document.warning("Malformed LyX document: Could not find end of Info inset.")
1445 tp = find_token(document.body, 'type', i, j)
1446 tpv = get_quoted_value(document.body, "type", tp)
1449 arg = find_token(document.body, 'arg', i, j)
1450 argv = get_quoted_value(document.body, "arg", arg)
1451 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1452 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1453 document.body[i : j+1] = argv
1456 def revert_listpargs(document):
1457 " Reverts listpreamble arguments to TeX-code "
1460 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1463 j = find_end_of_inset(document.body, i)
1464 # Find containing paragraph layout
1465 parent = get_containing_layout(document.body, i)
1467 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1470 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1471 endPlain = find_end_of_layout(document.body, beginPlain)
1472 content = document.body[beginPlain + 1 : endPlain]
1473 del document.body[i:j+1]
1474 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1475 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1476 document.body[parbeg : parbeg] = subst
1479 def revert_lformatinfo(document):
1480 " Revert layout format Info inset to text. "
1484 i = find_token(document.body, "\\begin_inset Info", i+1)
1487 j = find_end_of_inset(document.body, i+1)
1489 document.warning("Malformed LyX document: Could not find end of Info inset.")
1491 tp = find_token(document.body, 'type', i, j)
1492 tpv = get_quoted_value(document.body, "type", tp)
1493 if tpv != "lyxinfo":
1495 arg = find_token(document.body, 'arg', i, j)
1496 argv = get_quoted_value(document.body, "arg", arg)
1497 if argv != "layoutformat":
1500 document.body[i : j+1] = "69"
1503 def convert_hebrew_parentheses(document):
1504 """ Swap opening/closing parentheses in Hebrew text.
1506 Up to LyX 2.4, "(" was used as closing parenthesis and
1507 ")" as opening parenthesis for Hebrew in the LyX source.
1509 # print("convert hebrew parentheses")
1510 current_languages = [document.language]
1511 for i, line in enumerate(document.body):
1512 if line.startswith('\\lang '):
1513 current_languages[-1] = line.lstrip('\\lang ')
1514 elif line.startswith('\\begin_layout'):
1515 current_languages.append(current_languages[-1])
1516 # print (line, current_languages[-1])
1517 elif line.startswith('\\end_layout'):
1518 current_languages.pop()
1519 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1520 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1523 def revert_hebrew_parentheses(document):
1524 " Store parentheses in Hebrew text reversed"
1525 # This only exists to keep the convert/revert naming convention
1526 convert_hebrew_parentheses(document)
1529 def revert_malayalam(document):
1530 " Set the document language to English but assure Malayalam output "
1532 revert_language(document, "malayalam", "", "malayalam")
1535 def revert_soul(document):
1536 " Revert soul module flex insets to ERT "
1538 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1541 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1543 add_to_preamble(document, ["\\usepackage{soul}"])
1545 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1547 add_to_preamble(document, ["\\usepackage{color}"])
1549 revert_flex_inset(document.body, "Spaceletters", "\\so")
1550 revert_flex_inset(document.body, "Strikethrough", "\\st")
1551 revert_flex_inset(document.body, "Underline", "\\ul")
1552 revert_flex_inset(document.body, "Highlight", "\\hl")
1553 revert_flex_inset(document.body, "Capitalize", "\\caps")
1556 def revert_tablestyle(document):
1557 " Remove tablestyle params "
1560 i = find_token(document.header, "\\tablestyle")
1562 del document.header[i]
1565 def revert_bibfileencodings(document):
1566 " Revert individual Biblatex bibliography encodings "
1570 i = find_token(document.header, "\\cite_engine", 0)
1572 document.warning("Malformed document! Missing \\cite_engine")
1574 engine = get_value(document.header, "\\cite_engine", i)
1578 if engine in ["biblatex", "biblatex-natbib"]:
1581 # Map lyx to latex encoding names
1585 "armscii8" : "armscii8",
1586 "iso8859-1" : "latin1",
1587 "iso8859-2" : "latin2",
1588 "iso8859-3" : "latin3",
1589 "iso8859-4" : "latin4",
1590 "iso8859-5" : "iso88595",
1591 "iso8859-6" : "8859-6",
1592 "iso8859-7" : "iso-8859-7",
1593 "iso8859-8" : "8859-8",
1594 "iso8859-9" : "latin5",
1595 "iso8859-13" : "latin7",
1596 "iso8859-15" : "latin9",
1597 "iso8859-16" : "latin10",
1598 "applemac" : "applemac",
1600 "cp437de" : "cp437de",
1608 "cp1250" : "cp1250",
1609 "cp1251" : "cp1251",
1610 "cp1252" : "cp1252",
1611 "cp1255" : "cp1255",
1612 "cp1256" : "cp1256",
1613 "cp1257" : "cp1257",
1614 "koi8-r" : "koi8-r",
1615 "koi8-u" : "koi8-u",
1617 "utf8-platex" : "utf8",
1624 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1627 j = find_end_of_inset(document.body, i)
1629 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1631 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1635 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1636 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1637 if len(bibfiles) == 0:
1638 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1639 # remove encoding line
1640 k = find_token(document.body, "file_encodings", i, j)
1642 del document.body[k]
1643 # Re-find inset end line
1644 j = find_end_of_inset(document.body, i)
1646 enclist = encodings.split("\t")
1649 ppp = pp.split(" ", 1)
1650 encmap[ppp[0]] = ppp[1]
1651 for bib in bibfiles:
1652 pr = "\\addbibresource"
1653 if bib in encmap.keys():
1654 pr += "[bibencoding=" + encmap[bib] + "]"
1655 pr += "{" + bib + "}"
1656 add_to_preamble(document, [pr])
1657 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1658 pcmd = "printbibliography"
1660 pcmd += "[" + opts + "]"
1661 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1662 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1663 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1664 "status open", "", "\\begin_layout Plain Layout" ]
1665 repl += document.body[i:j+1]
1666 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1667 document.body[i:j+1] = repl
1673 def revert_cmidruletrimming(document):
1674 " Remove \\cmidrule trimming "
1676 # FIXME: Revert to TeX code?
1679 # first, let's find out if we need to do anything
1680 i = find_token(document.body, '<cell ', i+1)
1683 j = document.body[i].find('trim="')
1686 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1687 # remove trim option
1688 document.body[i] = rgx.sub('', document.body[i])
1692 r'### Inserted by lyx2lyx (ruby inset) ###',
1693 r'InsetLayout Flex:Ruby',
1694 r' LyxType charstyle',
1695 r' LatexType command',
1699 r' HTMLInnerTag rb',
1700 r' HTMLInnerAttr ""',
1702 r' LabelString "Ruby"',
1703 r' Decoration Conglomerate',
1705 r' \ifdefined\kanjiskip',
1706 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1707 r' \else \ifdefined\luatexversion',
1708 r' \usepackage{luatexja-ruby}',
1709 r' \else \ifdefined\XeTeXversion',
1710 r' \usepackage{ruby}%',
1712 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1714 r' Argument post:1',
1715 r' LabelString "ruby text"',
1716 r' MenuString "Ruby Text|R"',
1717 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1718 r' Decoration Conglomerate',
1730 def convert_ruby_module(document):
1731 " Use ruby module instead of local module definition "
1732 if document.del_local_layout(ruby_inset_def):
1733 document.add_module("ruby")
1735 def revert_ruby_module(document):
1736 " Replace ruby module with local module definition "
1737 if document.del_module("ruby"):
1738 document.append_local_layout(ruby_inset_def)
1741 def convert_utf8_japanese(document):
1742 " Use generic utf8 with Japanese documents."
1743 lang = get_value(document.header, "\\language")
1744 if not lang.startswith("japanese"):
1746 inputenc = get_value(document.header, "\\inputencoding")
1747 if ((lang == "japanese" and inputenc == "utf8-platex")
1748 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1749 document.set_parameter("inputencoding", "utf8")
1751 def revert_utf8_japanese(document):
1752 " Use Japanese utf8 variants with Japanese documents."
1753 inputenc = get_value(document.header, "\\inputencoding")
1754 if inputenc != "utf8":
1756 lang = get_value(document.header, "\\language")
1757 if lang == "japanese":
1758 document.set_parameter("inputencoding", "utf8-platex")
1759 if lang == "japanese-cjk":
1760 document.set_parameter("inputencoding", "utf8-cjk")
1763 def revert_lineno(document):
1764 " Replace lineno setting with user-preamble code."
1766 options = get_quoted_value(document.header, "\\lineno_options",
1768 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1771 options = "[" + options + "]"
1772 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1775 def convert_lineno(document):
1776 " Replace user-preamble code with native lineno support."
1779 i = find_token(document.preamble, "\\linenumbers", 1)
1781 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1784 options = usepkg.group(1).strip("[]")
1785 del(document.preamble[i-1:i+1])
1786 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1788 k = find_token(document.header, "\\index ")
1790 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1792 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1793 "\\lineno_options %s" % options]
1796 def revert_new_languages(document):
1797 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1798 and Russian (Petrine orthography)."""
1800 # lyxname: (babelname, polyglossianame)
1801 new_languages = {"azerbaijani": ("azerbaijani", ""),
1802 "bengali": ("", "bengali"),
1803 "churchslavonic": ("", "churchslavonic"),
1804 "oldrussian": ("", "russian"),
1805 "korean": ("", "korean"),
1807 used_languages = set()
1808 if document.language in new_languages:
1809 used_languages.add(document.language)
1812 i = find_token(document.body, "\\lang", i+1)
1815 if document.body[i][6:].strip() in new_languages:
1816 used_languages.add(document.language)
1818 # Korean is already supported via CJK, so leave as-is for Babel
1819 if ("korean" in used_languages
1820 and get_bool_value(document.header, "\\use_non_tex_fonts")
1821 and get_value(document.header, "\\language_package") in ("default", "auto")):
1822 revert_language(document, "korean", "", "korean")
1823 used_languages.discard("korean")
1825 for lang in used_languages:
1826 revert(lang, *new_languages[lang])
1830 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1831 r'InsetLayout Flex:Glosse',
1833 r' LabelString "Gloss (old version)"',
1834 r' MenuString "Gloss (old version)"',
1835 r' LatexType environment',
1836 r' LatexName linggloss',
1837 r' Decoration minimalistic',
1842 r' CustomPars false',
1843 r' ForcePlain true',
1844 r' ParbreakIsNewline true',
1845 r' FreeSpacing true',
1846 r' Requires covington',
1849 r' \@ifundefined{linggloss}{%',
1850 r' \newenvironment{linggloss}[2][]{',
1851 r' \def\glosstr{\glt #1}%',
1853 r' {\glosstr\glend}}{}',
1856 r' ResetsFont true',
1858 r' Decoration conglomerate',
1859 r' LabelString "Translation"',
1860 r' MenuString "Glosse Translation|s"',
1861 r' Tooltip "Add a translation for the glosse"',
1866 glosss_inset_def = [
1867 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1868 r'InsetLayout Flex:Tri-Glosse',
1870 r' LabelString "Tri-Gloss (old version)"',
1871 r' MenuString "Tri-Gloss (old version)"',
1872 r' LatexType environment',
1873 r' LatexName lingglosss',
1874 r' Decoration minimalistic',
1879 r' CustomPars false',
1880 r' ForcePlain true',
1881 r' ParbreakIsNewline true',
1882 r' FreeSpacing true',
1884 r' Requires covington',
1887 r' \@ifundefined{lingglosss}{%',
1888 r' \newenvironment{lingglosss}[2][]{',
1889 r' \def\glosstr{\glt #1}%',
1891 r' {\glosstr\glend}}{}',
1893 r' ResetsFont true',
1895 r' Decoration conglomerate',
1896 r' LabelString "Translation"',
1897 r' MenuString "Glosse Translation|s"',
1898 r' Tooltip "Add a translation for the glosse"',
1903 def convert_linggloss(document):
1904 " Move old ling glosses to local layout "
1905 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1906 document.append_local_layout(gloss_inset_def)
1907 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1908 document.append_local_layout(glosss_inset_def)
1910 def revert_linggloss(document):
1911 " Revert to old ling gloss definitions "
1912 if not "linguistics" in document.get_module_list():
1914 document.del_local_layout(gloss_inset_def)
1915 document.del_local_layout(glosss_inset_def)
1918 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1919 for glosse in glosses:
1922 i = find_token(document.body, glosse, i+1)
1925 j = find_end_of_inset(document.body, i)
1927 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1930 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1931 endarg = find_end_of_inset(document.body, arg)
1934 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1935 if argbeginPlain == -1:
1936 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1938 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1939 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1941 # remove Arg insets and paragraph, if it only contains this inset
1942 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1943 del document.body[arg - 1 : endarg + 4]
1945 del document.body[arg : endarg + 1]
1947 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1948 endarg = find_end_of_inset(document.body, arg)
1951 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1952 if argbeginPlain == -1:
1953 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1955 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1956 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1958 # remove Arg insets and paragraph, if it only contains this inset
1959 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1960 del document.body[arg - 1 : endarg + 4]
1962 del document.body[arg : endarg + 1]
1964 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1965 endarg = find_end_of_inset(document.body, arg)
1968 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1969 if argbeginPlain == -1:
1970 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1972 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1973 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1975 # remove Arg insets and paragraph, if it only contains this inset
1976 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1977 del document.body[arg - 1 : endarg + 4]
1979 del document.body[arg : endarg + 1]
1981 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1982 endarg = find_end_of_inset(document.body, arg)
1985 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1986 if argbeginPlain == -1:
1987 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1989 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1990 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1992 # remove Arg insets and paragraph, if it only contains this inset
1993 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1994 del document.body[arg - 1 : endarg + 4]
1996 del document.body[arg : endarg + 1]
1999 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
2002 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2003 endInset = find_end_of_inset(document.body, i)
2004 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2005 precontent = put_cmd_in_ert(cmd)
2006 if len(optargcontent) > 0:
2007 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2008 precontent += put_cmd_in_ert("{")
2010 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2011 if cmd == "\\trigloss":
2012 postcontent += put_cmd_in_ert("}{") + marg3content
2013 postcontent += put_cmd_in_ert("}")
2015 document.body[endPlain:endInset + 1] = postcontent
2016 document.body[beginPlain + 1:beginPlain] = precontent
2017 del document.body[i : beginPlain + 1]
2019 document.append_local_layout("Requires covington")
2024 def revert_subexarg(document):
2025 " Revert linguistic subexamples with argument to ERT "
2027 if not "linguistics" in document.get_module_list():
2033 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2036 j = find_end_of_layout(document.body, i)
2038 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2041 # check for consecutive layouts
2042 k = find_token(document.body, "\\begin_layout", j)
2043 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2045 j = find_end_of_layout(document.body, k)
2047 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2050 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2054 endarg = find_end_of_inset(document.body, arg)
2056 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2057 if argbeginPlain == -1:
2058 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2060 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2061 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2063 # remove Arg insets and paragraph, if it only contains this inset
2064 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2065 del document.body[arg - 1 : endarg + 4]
2067 del document.body[arg : endarg + 1]
2069 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2071 # re-find end of layout
2072 j = find_end_of_layout(document.body, i)
2074 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2077 # check for consecutive layouts
2078 k = find_token(document.body, "\\begin_layout", j)
2079 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2081 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2082 j = find_end_of_layout(document.body, k)
2084 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2087 endev = put_cmd_in_ert("\\end{subexamples}")
2089 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2090 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2091 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2093 document.append_local_layout("Requires covington")
2097 def revert_drs(document):
2098 " Revert DRS insets (linguistics) to ERT "
2100 if not "linguistics" in document.get_module_list():
2104 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2105 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2106 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2107 "\\begin_inset Flex SDRS"]
2111 i = find_token(document.body, drs, i+1)
2114 j = find_end_of_inset(document.body, i)
2116 document.warning("Malformed LyX document: Can't find end of DRS inset")
2119 # Check for arguments
2120 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2121 endarg = find_end_of_inset(document.body, arg)
2124 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2125 if argbeginPlain == -1:
2126 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2128 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2129 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2131 # remove Arg insets and paragraph, if it only contains this inset
2132 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2133 del document.body[arg - 1 : endarg + 4]
2135 del document.body[arg : endarg + 1]
2138 j = find_end_of_inset(document.body, i)
2140 document.warning("Malformed LyX document: Can't find end of DRS inset")
2143 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2144 endarg = find_end_of_inset(document.body, arg)
2147 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2148 if argbeginPlain == -1:
2149 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2151 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2152 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2154 # remove Arg insets and paragraph, if it only contains this inset
2155 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2156 del document.body[arg - 1 : endarg + 4]
2158 del document.body[arg : endarg + 1]
2161 j = find_end_of_inset(document.body, i)
2163 document.warning("Malformed LyX document: Can't find end of DRS inset")
2166 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2167 endarg = find_end_of_inset(document.body, arg)
2168 postarg1content = []
2170 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2171 if argbeginPlain == -1:
2172 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2174 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2175 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2177 # remove Arg insets and paragraph, if it only contains this inset
2178 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2179 del document.body[arg - 1 : endarg + 4]
2181 del document.body[arg : endarg + 1]
2184 j = find_end_of_inset(document.body, i)
2186 document.warning("Malformed LyX document: Can't find end of DRS inset")
2189 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2190 endarg = find_end_of_inset(document.body, arg)
2191 postarg2content = []
2193 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2194 if argbeginPlain == -1:
2195 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2197 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2198 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2200 # remove Arg insets and paragraph, if it only contains this inset
2201 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2202 del document.body[arg - 1 : endarg + 4]
2204 del document.body[arg : endarg + 1]
2207 j = find_end_of_inset(document.body, i)
2209 document.warning("Malformed LyX document: Can't find end of DRS inset")
2212 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2213 endarg = find_end_of_inset(document.body, arg)
2214 postarg3content = []
2216 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2217 if argbeginPlain == -1:
2218 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2220 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2221 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2223 # remove Arg insets and paragraph, if it only contains this inset
2224 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2225 del document.body[arg - 1 : endarg + 4]
2227 del document.body[arg : endarg + 1]
2230 j = find_end_of_inset(document.body, i)
2232 document.warning("Malformed LyX document: Can't find end of DRS inset")
2235 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2236 endarg = find_end_of_inset(document.body, arg)
2237 postarg4content = []
2239 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2240 if argbeginPlain == -1:
2241 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2243 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2244 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2246 # remove Arg insets and paragraph, if it only contains this inset
2247 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2248 del document.body[arg - 1 : endarg + 4]
2250 del document.body[arg : endarg + 1]
2252 # The respective LaTeX command
2254 if drs == "\\begin_inset Flex DRS*":
2256 elif drs == "\\begin_inset Flex IfThen-DRS":
2258 elif drs == "\\begin_inset Flex Cond-DRS":
2260 elif drs == "\\begin_inset Flex QDRS":
2262 elif drs == "\\begin_inset Flex NegDRS":
2264 elif drs == "\\begin_inset Flex SDRS":
2267 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2268 endInset = find_end_of_inset(document.body, i)
2269 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2270 precontent = put_cmd_in_ert(cmd)
2271 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2272 if drs == "\\begin_inset Flex SDRS":
2273 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2274 precontent += put_cmd_in_ert("{")
2277 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2278 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2279 if cmd == "\\condrs" or cmd == "\\qdrs":
2280 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2282 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2284 postcontent = put_cmd_in_ert("}")
2286 document.body[endPlain:endInset + 1] = postcontent
2287 document.body[beginPlain + 1:beginPlain] = precontent
2288 del document.body[i : beginPlain + 1]
2290 document.append_local_layout("Provides covington 1")
2291 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2297 def revert_babelfont(document):
2298 " Reverts the use of \\babelfont to user preamble "
2300 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2302 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2304 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2306 i = find_token(document.header, '\\language_package', 0)
2308 document.warning("Malformed LyX document: Missing \\language_package.")
2310 if get_value(document.header, "\\language_package", 0) != "babel":
2313 # check font settings
2315 roman = sans = typew = "default"
2317 sf_scale = tt_scale = 100.0
2319 j = find_token(document.header, "\\font_roman", 0)
2321 document.warning("Malformed LyX document: Missing \\font_roman.")
2323 # We need to use this regex since split() does not handle quote protection
2324 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2325 roman = romanfont[2].strip('"')
2326 romanfont[2] = '"default"'
2327 document.header[j] = " ".join(romanfont)
2329 j = find_token(document.header, "\\font_sans", 0)
2331 document.warning("Malformed LyX document: Missing \\font_sans.")
2333 # We need to use this regex since split() does not handle quote protection
2334 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2335 sans = sansfont[2].strip('"')
2336 sansfont[2] = '"default"'
2337 document.header[j] = " ".join(sansfont)
2339 j = find_token(document.header, "\\font_typewriter", 0)
2341 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2343 # We need to use this regex since split() does not handle quote protection
2344 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2345 typew = ttfont[2].strip('"')
2346 ttfont[2] = '"default"'
2347 document.header[j] = " ".join(ttfont)
2349 i = find_token(document.header, "\\font_osf", 0)
2351 document.warning("Malformed LyX document: Missing \\font_osf.")
2353 osf = str2bool(get_value(document.header, "\\font_osf", i))
2355 j = find_token(document.header, "\\font_sf_scale", 0)
2357 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2359 sfscale = document.header[j].split()
2362 document.header[j] = " ".join(sfscale)
2365 sf_scale = float(val)
2367 document.warning("Invalid font_sf_scale value: " + val)
2369 j = find_token(document.header, "\\font_tt_scale", 0)
2371 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2373 ttscale = document.header[j].split()
2376 document.header[j] = " ".join(ttscale)
2379 tt_scale = float(val)
2381 document.warning("Invalid font_tt_scale value: " + val)
2383 # set preamble stuff
2384 pretext = ['%% This document must be processed with xelatex or lualatex!']
2385 pretext.append('\\AtBeginDocument{%')
2386 if roman != "default":
2387 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2388 if sans != "default":
2389 sf = '\\babelfont{sf}['
2390 if sf_scale != 100.0:
2391 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2392 sf += 'Mapping=tex-text]{' + sans + '}'
2394 if typew != "default":
2395 tw = '\\babelfont{tt}'
2396 if tt_scale != 100.0:
2397 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2398 tw += '{' + typew + '}'
2401 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2403 insert_to_preamble(document, pretext)
2406 def revert_minionpro(document):
2407 " Revert native MinionPro font definition (with extra options) to LaTeX "
2409 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2411 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2413 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2416 regexp = re.compile(r'(\\font_roman_opts)')
2417 x = find_re(document.header, regexp, 0)
2421 # We need to use this regex since split() does not handle quote protection
2422 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2423 opts = romanopts[1].strip('"')
2425 i = find_token(document.header, "\\font_roman", 0)
2427 document.warning("Malformed LyX document: Missing \\font_roman.")
2430 # We need to use this regex since split() does not handle quote protection
2431 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2432 roman = romanfont[1].strip('"')
2433 if roman != "minionpro":
2435 romanfont[1] = '"default"'
2436 document.header[i] = " ".join(romanfont)
2438 j = find_token(document.header, "\\font_osf true", 0)
2441 preamble = "\\usepackage["
2443 document.header[j] = "\\font_osf false"
2447 preamble += "]{MinionPro}"
2448 add_to_preamble(document, [preamble])
2449 del document.header[x]
2452 def revert_font_opts(document):
2453 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2455 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2457 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2459 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2460 i = find_token(document.header, '\\language_package', 0)
2462 document.warning("Malformed LyX document: Missing \\language_package.")
2464 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2467 regexp = re.compile(r'(\\font_roman_opts)')
2468 i = find_re(document.header, regexp, 0)
2470 # We need to use this regex since split() does not handle quote protection
2471 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2472 opts = romanopts[1].strip('"')
2473 del document.header[i]
2475 regexp = re.compile(r'(\\font_roman)')
2476 i = find_re(document.header, regexp, 0)
2478 # We need to use this regex since split() does not handle quote protection
2479 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2480 font = romanfont[2].strip('"')
2481 romanfont[2] = '"default"'
2482 document.header[i] = " ".join(romanfont)
2483 if font != "default":
2485 preamble = "\\babelfont{rm}["
2487 preamble = "\\setmainfont["
2490 preamble += "Mapping=tex-text]{"
2493 add_to_preamble(document, [preamble])
2496 regexp = re.compile(r'(\\font_sans_opts)')
2497 i = find_re(document.header, regexp, 0)
2500 # We need to use this regex since split() does not handle quote protection
2501 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2502 opts = sfopts[1].strip('"')
2503 del document.header[i]
2505 regexp = re.compile(r'(\\font_sf_scale)')
2506 i = find_re(document.header, regexp, 0)
2508 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2509 regexp = re.compile(r'(\\font_sans)')
2510 i = find_re(document.header, regexp, 0)
2512 # We need to use this regex since split() does not handle quote protection
2513 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2514 font = sffont[2].strip('"')
2515 sffont[2] = '"default"'
2516 document.header[i] = " ".join(sffont)
2517 if font != "default":
2519 preamble = "\\babelfont{sf}["
2521 preamble = "\\setsansfont["
2525 preamble += "Scale=0."
2526 preamble += scaleval
2528 preamble += "Mapping=tex-text]{"
2531 add_to_preamble(document, [preamble])
2534 regexp = re.compile(r'(\\font_typewriter_opts)')
2535 i = find_re(document.header, regexp, 0)
2538 # We need to use this regex since split() does not handle quote protection
2539 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2540 opts = ttopts[1].strip('"')
2541 del document.header[i]
2543 regexp = re.compile(r'(\\font_tt_scale)')
2544 i = find_re(document.header, regexp, 0)
2546 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2547 regexp = re.compile(r'(\\font_typewriter)')
2548 i = find_re(document.header, regexp, 0)
2550 # We need to use this regex since split() does not handle quote protection
2551 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2552 font = ttfont[2].strip('"')
2553 ttfont[2] = '"default"'
2554 document.header[i] = " ".join(ttfont)
2555 if font != "default":
2557 preamble = "\\babelfont{tt}["
2559 preamble = "\\setmonofont["
2563 preamble += "Scale=0."
2564 preamble += scaleval
2566 preamble += "Mapping=tex-text]{"
2569 add_to_preamble(document, [preamble])
2572 def revert_plainNotoFonts_xopts(document):
2573 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2575 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2577 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2579 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2583 y = find_token(document.header, "\\font_osf true", 0)
2587 regexp = re.compile(r'(\\font_roman_opts)')
2588 x = find_re(document.header, regexp, 0)
2589 if x == -1 and not osf:
2594 # We need to use this regex since split() does not handle quote protection
2595 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2596 opts = romanopts[1].strip('"')
2602 i = find_token(document.header, "\\font_roman", 0)
2606 # We need to use this regex since split() does not handle quote protection
2607 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2608 roman = romanfont[1].strip('"')
2609 if roman != "NotoSerif-TLF":
2612 j = find_token(document.header, "\\font_sans", 0)
2616 # We need to use this regex since split() does not handle quote protection
2617 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2618 sf = sffont[1].strip('"')
2622 j = find_token(document.header, "\\font_typewriter", 0)
2626 # We need to use this regex since split() does not handle quote protection
2627 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2628 tt = ttfont[1].strip('"')
2632 # So we have noto as "complete font"
2633 romanfont[1] = '"default"'
2634 document.header[i] = " ".join(romanfont)
2636 preamble = "\\usepackage["
2638 preamble += "]{noto}"
2639 add_to_preamble(document, [preamble])
2641 document.header[y] = "\\font_osf false"
2643 del document.header[x]
2646 def revert_notoFonts_xopts(document):
2647 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2649 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2651 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2653 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2657 fm = createFontMapping(['Noto'])
2658 if revert_fonts(document, fm, fontmap, True):
2659 add_preamble_fonts(document, fontmap)
2662 def revert_IBMFonts_xopts(document):
2663 " Revert native IBM font definition (with extra options) to LaTeX "
2665 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2667 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2669 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2673 fm = createFontMapping(['IBM'])
2675 if revert_fonts(document, fm, fontmap, True):
2676 add_preamble_fonts(document, fontmap)
2679 def revert_AdobeFonts_xopts(document):
2680 " Revert native Adobe font definition (with extra options) to LaTeX "
2682 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2684 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2686 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2690 fm = createFontMapping(['Adobe'])
2692 if revert_fonts(document, fm, fontmap, True):
2693 add_preamble_fonts(document, fontmap)
2696 def convert_osf(document):
2697 " Convert \\font_osf param to new format "
2700 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2702 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2704 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2706 i = find_token(document.header, '\\font_osf', 0)
2708 document.warning("Malformed LyX document: Missing \\font_osf.")
2711 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2712 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2714 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2715 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2718 document.header.insert(i, "\\font_sans_osf false")
2719 document.header.insert(i + 1, "\\font_typewriter_osf false")
2723 x = find_token(document.header, "\\font_sans", 0)
2725 document.warning("Malformed LyX document: Missing \\font_sans.")
2727 # We need to use this regex since split() does not handle quote protection
2728 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2729 sf = sffont[1].strip('"')
2731 document.header.insert(i, "\\font_sans_osf true")
2733 document.header.insert(i, "\\font_sans_osf false")
2735 x = find_token(document.header, "\\font_typewriter", 0)
2737 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2739 # We need to use this regex since split() does not handle quote protection
2740 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2741 tt = ttfont[1].strip('"')
2743 document.header.insert(i + 1, "\\font_typewriter_osf true")
2745 document.header.insert(i + 1, "\\font_typewriter_osf false")
2748 document.header.insert(i, "\\font_sans_osf false")
2749 document.header.insert(i + 1, "\\font_typewriter_osf false")
2752 def revert_osf(document):
2753 " Revert \\font_*_osf params "
2756 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2758 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2760 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2762 i = find_token(document.header, '\\font_roman_osf', 0)
2764 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2767 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2768 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2770 i = find_token(document.header, '\\font_sans_osf', 0)
2772 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2775 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2776 del document.header[i]
2778 i = find_token(document.header, '\\font_typewriter_osf', 0)
2780 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2783 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2784 del document.header[i]
2787 i = find_token(document.header, '\\font_osf', 0)
2789 document.warning("Malformed LyX document: Missing \\font_osf.")
2791 document.header[i] = "\\font_osf true"
2794 def revert_texfontopts(document):
2795 " Revert native TeX font definitions (with extra options) to LaTeX "
2797 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2799 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2801 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2804 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2806 # First the sf (biolinum only)
2807 regexp = re.compile(r'(\\font_sans_opts)')
2808 x = find_re(document.header, regexp, 0)
2810 # We need to use this regex since split() does not handle quote protection
2811 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2812 opts = sfopts[1].strip('"')
2813 i = find_token(document.header, "\\font_sans", 0)
2815 document.warning("Malformed LyX document: Missing \\font_sans.")
2817 # We need to use this regex since split() does not handle quote protection
2818 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2819 sans = sffont[1].strip('"')
2820 if sans == "biolinum":
2822 sffont[1] = '"default"'
2823 document.header[i] = " ".join(sffont)
2825 j = find_token(document.header, "\\font_sans_osf true", 0)
2828 k = find_token(document.header, "\\font_sf_scale", 0)
2830 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2832 sfscale = document.header[k].split()
2835 document.header[k] = " ".join(sfscale)
2838 sf_scale = float(val)
2840 document.warning("Invalid font_sf_scale value: " + val)
2841 preamble = "\\usepackage["
2843 document.header[j] = "\\font_sans_osf false"
2845 if sf_scale != 100.0:
2846 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2848 preamble += "]{biolinum}"
2849 add_to_preamble(document, [preamble])
2850 del document.header[x]
2852 regexp = re.compile(r'(\\font_roman_opts)')
2853 x = find_re(document.header, regexp, 0)
2857 # We need to use this regex since split() does not handle quote protection
2858 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2859 opts = romanopts[1].strip('"')
2861 i = find_token(document.header, "\\font_roman", 0)
2863 document.warning("Malformed LyX document: Missing \\font_roman.")
2866 # We need to use this regex since split() does not handle quote protection
2867 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2868 roman = romanfont[1].strip('"')
2869 if not roman in rmfonts:
2871 romanfont[1] = '"default"'
2872 document.header[i] = " ".join(romanfont)
2874 if roman == "utopia":
2876 elif roman == "palatino":
2877 package = "mathpazo"
2878 elif roman == "times":
2879 package = "mathptmx"
2880 elif roman == "xcharter":
2881 package = "XCharter"
2883 j = find_token(document.header, "\\font_roman_osf true", 0)
2885 if roman == "cochineal":
2886 osf = "proportional,osf,"
2887 elif roman == "utopia":
2889 elif roman == "garamondx":
2891 elif roman == "libertine":
2893 elif roman == "palatino":
2895 elif roman == "xcharter":
2897 document.header[j] = "\\font_roman_osf false"
2898 k = find_token(document.header, "\\font_sc true", 0)
2900 if roman == "utopia":
2902 if roman == "palatino" and osf == "":
2904 document.header[k] = "\\font_sc false"
2905 preamble = "\\usepackage["
2908 preamble += "]{" + package + "}"
2909 add_to_preamble(document, [preamble])
2910 del document.header[x]
2913 def convert_CantarellFont(document):
2914 " Handle Cantarell font definition to LaTeX "
2916 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2917 fm = createFontMapping(['Cantarell'])
2918 convert_fonts(document, fm, "oldstyle")
2920 def revert_CantarellFont(document):
2921 " Revert native Cantarell font definition to LaTeX "
2923 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2925 fm = createFontMapping(['Cantarell'])
2926 if revert_fonts(document, fm, fontmap, False, True):
2927 add_preamble_fonts(document, fontmap)
2929 def convert_ChivoFont(document):
2930 " Handle Chivo font definition to LaTeX "
2932 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2933 fm = createFontMapping(['Chivo'])
2934 convert_fonts(document, fm, "oldstyle")
2936 def revert_ChivoFont(document):
2937 " Revert native Chivo font definition to LaTeX "
2939 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2941 fm = createFontMapping(['Chivo'])
2942 if revert_fonts(document, fm, fontmap, False, True):
2943 add_preamble_fonts(document, fontmap)
2946 def convert_FiraFont(document):
2947 " Handle Fira font definition to LaTeX "
2949 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2950 fm = createFontMapping(['Fira'])
2951 convert_fonts(document, fm, "lf")
2953 def revert_FiraFont(document):
2954 " Revert native Fira font definition to LaTeX "
2956 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2958 fm = createFontMapping(['Fira'])
2959 if revert_fonts(document, fm, fontmap, False, True):
2960 add_preamble_fonts(document, fontmap)
2963 def convert_Semibolds(document):
2964 " Move semibold options to extraopts "
2967 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2969 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2971 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2973 i = find_token(document.header, "\\font_roman", 0)
2975 document.warning("Malformed LyX document: Missing \\font_roman.")
2977 # We need to use this regex since split() does not handle quote protection
2978 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2979 roman = romanfont[1].strip('"')
2980 if roman == "IBMPlexSerifSemibold":
2981 romanfont[1] = '"IBMPlexSerif"'
2982 document.header[i] = " ".join(romanfont)
2984 if NonTeXFonts == False:
2985 regexp = re.compile(r'(\\font_roman_opts)')
2986 x = find_re(document.header, regexp, 0)
2988 # Sensible place to insert tag
2989 fo = find_token(document.header, "\\font_sf_scale")
2991 document.warning("Malformed LyX document! Missing \\font_sf_scale")
2993 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
2995 # We need to use this regex since split() does not handle quote protection
2996 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2997 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
2999 i = find_token(document.header, "\\font_sans", 0)
3001 document.warning("Malformed LyX document: Missing \\font_sans.")
3003 # We need to use this regex since split() does not handle quote protection
3004 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3005 sf = sffont[1].strip('"')
3006 if sf == "IBMPlexSansSemibold":
3007 sffont[1] = '"IBMPlexSans"'
3008 document.header[i] = " ".join(sffont)
3010 if NonTeXFonts == False:
3011 regexp = re.compile(r'(\\font_sans_opts)')
3012 x = find_re(document.header, regexp, 0)
3014 # Sensible place to insert tag
3015 fo = find_token(document.header, "\\font_sf_scale")
3017 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3019 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3021 # We need to use this regex since split() does not handle quote protection
3022 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3023 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3025 i = find_token(document.header, "\\font_typewriter", 0)
3027 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3029 # We need to use this regex since split() does not handle quote protection
3030 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3031 tt = ttfont[1].strip('"')
3032 if tt == "IBMPlexMonoSemibold":
3033 ttfont[1] = '"IBMPlexMono"'
3034 document.header[i] = " ".join(ttfont)
3036 if NonTeXFonts == False:
3037 regexp = re.compile(r'(\\font_typewriter_opts)')
3038 x = find_re(document.header, regexp, 0)
3040 # Sensible place to insert tag
3041 fo = find_token(document.header, "\\font_tt_scale")
3043 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3045 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3047 # We need to use this regex since split() does not handle quote protection
3048 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3049 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3052 def convert_NotoRegulars(document):
3053 " Merge diverse noto reagular fonts "
3055 i = find_token(document.header, "\\font_roman", 0)
3057 document.warning("Malformed LyX document: Missing \\font_roman.")
3059 # We need to use this regex since split() does not handle quote protection
3060 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3061 roman = romanfont[1].strip('"')
3062 if roman == "NotoSerif-TLF":
3063 romanfont[1] = '"NotoSerifRegular"'
3064 document.header[i] = " ".join(romanfont)
3066 i = find_token(document.header, "\\font_sans", 0)
3068 document.warning("Malformed LyX document: Missing \\font_sans.")
3070 # We need to use this regex since split() does not handle quote protection
3071 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3072 sf = sffont[1].strip('"')
3073 if sf == "NotoSans-TLF":
3074 sffont[1] = '"NotoSansRegular"'
3075 document.header[i] = " ".join(sffont)
3077 i = find_token(document.header, "\\font_typewriter", 0)
3079 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3081 # We need to use this regex since split() does not handle quote protection
3082 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3083 tt = ttfont[1].strip('"')
3084 if tt == "NotoMono-TLF":
3085 ttfont[1] = '"NotoMonoRegular"'
3086 document.header[i] = " ".join(ttfont)
3089 def convert_CrimsonProFont(document):
3090 " Handle CrimsonPro font definition to LaTeX "
3092 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3093 fm = createFontMapping(['CrimsonPro'])
3094 convert_fonts(document, fm, "lf")
3096 def revert_CrimsonProFont(document):
3097 " Revert native CrimsonPro font definition to LaTeX "
3099 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
3101 fm = createFontMapping(['CrimsonPro'])
3102 if revert_fonts(document, fm, fontmap, False, True):
3103 add_preamble_fonts(document, fontmap)
3109 supported_versions = ["2.4.0", "2.4"]
3111 [545, [convert_lst_literalparam]],
3116 [550, [convert_fontenc]],
3123 [557, [convert_vcsinfo]],
3124 [558, [removeFrontMatterStyles]],
3127 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3131 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3132 [566, [convert_hebrew_parentheses]],
3138 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3139 [573, [convert_inputencoding_namechange]],
3140 [574, [convert_ruby_module, convert_utf8_japanese]],
3141 [575, [convert_lineno]],
3143 [577, [convert_linggloss]],
3147 [581, [convert_osf]],
3148 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3149 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars,convert_CrimsonProFont]],
3152 revert = [[582, [revert_ChivoFont,revert_CrimsonProFont]],
3153 [581, [revert_CantarellFont,revert_FiraFont]],
3154 [580, [revert_texfontopts,revert_osf]],
3155 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3156 [578, [revert_babelfont]],
3157 [577, [revert_drs]],
3158 [576, [revert_linggloss, revert_subexarg]],
3159 [575, [revert_new_languages]],
3160 [574, [revert_lineno]],
3161 [573, [revert_ruby_module, revert_utf8_japanese]],
3162 [572, [revert_inputencoding_namechange]],
3163 [571, [revert_notoFonts]],
3164 [570, [revert_cmidruletrimming]],
3165 [569, [revert_bibfileencodings]],
3166 [568, [revert_tablestyle]],
3167 [567, [revert_soul]],
3168 [566, [revert_malayalam]],
3169 [565, [revert_hebrew_parentheses]],
3170 [564, [revert_AdobeFonts]],
3171 [563, [revert_lformatinfo]],
3172 [562, [revert_listpargs]],
3173 [561, [revert_l7ninfo]],
3174 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3175 [559, [revert_timeinfo, revert_namenoextinfo]],
3176 [558, [revert_dateinfo]],
3177 [557, [addFrontMatterStyles]],
3178 [556, [revert_vcsinfo]],
3179 [555, [revert_bibencoding]],
3180 [554, [revert_vcolumns]],
3181 [553, [revert_stretchcolumn]],
3182 [552, [revert_tuftecite]],
3183 [551, [revert_floatpclass, revert_floatalignment]],
3184 [550, [revert_nospellcheck]],
3185 [549, [revert_fontenc]],
3186 [548, []],# dummy format change
3187 [547, [revert_lscape]],
3188 [546, [revert_xcharter]],
3189 [545, [revert_paratype]],
3190 [544, [revert_lst_literalparam]]
3194 if __name__ == "__main__":