1 # -*- coding: utf-8 -*-
2 # This file is part of lyx2lyx
3 # Copyright (C) 2018 The LyX team
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 """ Convert files to the file format generated by lyx 2.4"""
25 from datetime import (datetime, date, time)
27 # Uncomment only what you need to import, please.
29 from parser_tools import (count_pars_in_inset, del_token, find_end_of_inset,
30 find_end_of_layout, find_token, find_token_backwards, find_token_exact,
31 find_re, get_bool_value,
32 get_containing_layout, get_option_value, get_value, get_quoted_value)
33 # del_value, del_complete_lines,
34 # find_complete_lines, find_end_of,
35 # find_re, find_substring,
36 # get_containing_inset,
37 # is_in_inset, set_bool_value
38 # find_tokens, check_token
40 from lyx2lyx_tools import (put_cmd_in_ert, add_to_preamble, insert_to_preamble, lyx2latex,
41 revert_language, revert_flex_inset, str2bool)
42 # revert_font_attrs, latex_length
43 # get_ert, lyx2verbatim, length_in_bp, convert_info_insets
44 # revert_flex_inset, hex2ratio
46 ####################################################################
47 # Private helper functions
49 def add_preamble_fonts(document, fontmap):
50 " Add collected font-packages with their option to user-preamble"
53 if len(fontmap[pkg]) > 0:
54 xoption = "[" + ",".join(fontmap[pkg]) + "]"
57 preamble = "\\usepackage%s{%s}" % (xoption, pkg)
58 add_to_preamble(document, [preamble])
61 def createkey(pkg, options):
63 return pkg + ':' + "-".join(options)
67 self.fontname = None # key into font2pkgmap
68 self.fonttype = None # roman,sans,typewriter,math
69 self.scaletype = None # None,sf,tt
70 self.scaleopt = None # None, 'scaled', 'scale'
74 self.pkgkey = None # key into pkg2fontmap
75 self.osfopt = None # None, string
76 self.osfdef = "false" # "false" or "true"
79 self.pkgkey = createkey(self.package, self.options)
83 self.font2pkgmap = dict()
84 self.pkg2fontmap = dict()
85 self.pkginmap = dict() # defines, if a map for package exists
87 def expandFontMapping(self, font_list, font_type, scale_type, pkg, scaleopt = None, osfopt = None, osfdef = "false"):
88 " Expand fontinfo mapping"
90 # fontlist: list of fontnames, each element
91 # may contain a ','-separated list of needed options
92 # like e.g. 'IBMPlexSansCondensed,condensed'
93 # font_type: one of 'roman', 'sans', 'typewriter', 'math'
94 # scale_type: one of None, 'sf', 'tt'
95 # pkg: package defining the font. Defaults to fontname if None
96 # scaleopt: one of None, 'scale', 'scaled', or some other string
97 # to be used in scale option (e.g. scaled=0.7)
98 # osfopt: None or some other string to be used in osf option
99 # osfdef: "true" if osf is default
102 fe.fonttype = font_type
103 fe.scaletype = scale_type
106 fe.fontname = font_name
108 fe.scaleopt = scaleopt
112 fe.package = font_name
116 self.font2pkgmap[font_name] = fe
117 if fe.pkgkey in self.pkg2fontmap:
118 # Repeated the same entry? Check content
119 if self.pkg2fontmap[fe.pkgkey] != font_name:
120 document.error("Something is wrong in pkgname+options <-> fontname mapping")
121 self.pkg2fontmap[fe.pkgkey] = font_name
122 self.pkginmap[fe.package] = 1
124 def getfontname(self, pkg, options):
126 pkgkey = createkey(pkg, options)
127 if not pkgkey in self.pkg2fontmap:
129 fontname = self.pkg2fontmap[pkgkey]
130 if not fontname in self.font2pkgmap:
131 document.error("Something is wrong in pkgname+options <-> fontname mapping")
133 if pkgkey == self.font2pkgmap[fontname].pkgkey:
137 def createFontMapping(fontlist):
138 # Create info for known fonts for the use in
139 # convert_latexFonts() and
140 # revert_latexFonts()
142 # * Would be more handy to parse latexFonts file,
143 # but the path to this file is unknown
144 # * For now, add DejaVu and IBMPlex only.
145 # * Expand, if desired
147 for font in fontlist:
149 fm.expandFontMapping(['DejaVuSerif', 'DejaVuSerifCondensed'], "roman", None, None)
150 fm.expandFontMapping(['DejaVuSans','DejaVuSansCondensed'], "sans", "sf", None, "scaled")
151 fm.expandFontMapping(['DejaVuSansMono'], "typewriter", "tt", None, "scaled")
153 fm.expandFontMapping(['IBMPlexSerif', 'IBMPlexSerifThin,thin',
154 'IBMPlexSerifExtraLight,extralight', 'IBMPlexSerifLight,light',
155 'IBMPlexSerifSemibold,semibold'],
156 "roman", None, "plex-serif")
157 fm.expandFontMapping(['IBMPlexSans','IBMPlexSansCondensed,condensed',
158 'IBMPlexSansThin,thin', 'IBMPlexSansExtraLight,extralight',
159 'IBMPlexSansLight,light', 'IBMPlexSansSemibold,semibold'],
160 "sans", "sf", "plex-sans", "scale")
161 fm.expandFontMapping(['IBMPlexMono', 'IBMPlexMonoThin,thin',
162 'IBMPlexMonoExtraLight,extralight', 'IBMPlexMonoLight,light',
163 'IBMPlexMonoSemibold,semibold'],
164 "typewriter", "tt", "plex-mono", "scale")
165 elif font == 'Adobe':
166 fm.expandFontMapping(['ADOBESourceSerifPro'], "roman", None, "sourceserifpro", None, "osf")
167 fm.expandFontMapping(['ADOBESourceSansPro'], "sans", "sf", "sourcesanspro", "scaled", "osf")
168 fm.expandFontMapping(['ADOBESourceCodePro'], "typewriter", "tt", "sourcecodepro", "scaled", "osf")
170 fm.expandFontMapping(['NotoSerifRegular,regular', 'NotoSerifMedium,medium',
171 'NotoSerifThin,thin', 'NotoSerifLight,light',
172 'NotoSerifExtralight,extralight'],
173 "roman", None, "noto-serif", None, "osf")
174 fm.expandFontMapping(['NotoSansRegular,regular', 'NotoSansMedium,medium',
175 'NotoSansThin,thin', 'NotoSansLight,light',
176 'NotoSansExtralight,extralight'],
177 "sans", "sf", "noto-sans", "scaled")
178 fm.expandFontMapping(['NotoMonoRegular,regular'], "typewriter", "tt", "noto-mono", "scaled")
179 elif font == 'Cantarell':
180 fm.expandFontMapping(['cantarell,defaultsans'],
181 "sans", "sf", "cantarell", "scaled", "oldstyle")
182 elif font == 'Chivo':
183 fm.expandFontMapping(['ChivoThin,thin', 'ChivoLight,light',
184 'Chivo,regular', 'ChivoMedium,medium'],
185 "sans", "sf", "Chivo", "scale", "oldstyle")
187 fm.expandFontMapping(['FiraSans', 'FiraSansBook,book',
188 'FiraSansThin,thin', 'FiraSansLight,light',
189 'FiraSansExtralight,extralight',
190 'FiraSansUltralight,ultralight'],
191 "sans", "sf", "FiraSans", "scaled", "lf", "true")
192 fm.expandFontMapping(['FiraMono'], "typewriter", "tt", "FiraMono", "scaled", "lf", "true")
195 def convert_fonts(document, fm, osfoption = "osf"):
196 " Handle font definition (LaTeX preamble -> native) "
198 rpkg = re.compile(r'^\\usepackage(\[([^\]]*)\])?\{([^\}]+)\}')
199 rscaleopt = re.compile(r'^scaled?=(.*)')
201 # Check whether we go beyond font option feature introduction
202 haveFontOpts = document.end_format > 580
205 while i < len(document.preamble):
206 i = find_re(document.preamble, rpkg, i+1)
209 mo = rpkg.search(document.preamble[i])
210 if mo == None or mo.group(2) == None:
213 options = mo.group(2).replace(' ', '').split(",")
218 while o < len(options):
219 if options[o] == osfoption:
223 mo = rscaleopt.search(options[o])
231 if not pkg in fm.pkginmap:
236 # Try with name-option combination first
237 # (only one default option supported currently)
239 while o < len(options):
241 fn = fm.getfontname(pkg, [opt])
248 fn = fm.getfontname(pkg, [])
250 fn = fm.getfontname(pkg, options)
253 del document.preamble[i]
254 fontinfo = fm.font2pkgmap[fn]
255 if fontinfo.scaletype == None:
258 fontscale = "\\font_" + fontinfo.scaletype + "_scale"
259 fontinfo.scaleval = oscale
260 if (has_osf and fontinfo.osfdef == "false") or (not has_osf and fontinfo.osfdef == "true"):
261 if fontinfo.osfopt == None:
262 options.extend(osfoption)
264 osf = find_token(document.header, "\\font_osf false")
265 osftag = "\\font_osf"
266 if osf == -1 and fontinfo.fonttype != "math":
267 # Try with newer format
268 osftag = "\\font_" + fontinfo.fonttype + "_osf"
269 osf = find_token(document.header, osftag + " false")
271 document.header[osf] = osftag + " true"
272 if i > 0 and document.preamble[i-1] == "% Added by lyx2lyx":
273 del document.preamble[i-1]
275 if fontscale != None:
276 j = find_token(document.header, fontscale, 0)
278 val = get_value(document.header, fontscale, j)
282 scale = "%03d" % int(float(oscale) * 100)
283 document.header[j] = fontscale + " " + scale + " " + vals[1]
284 ft = "\\font_" + fontinfo.fonttype
285 j = find_token(document.header, ft, 0)
287 val = get_value(document.header, ft, j)
288 words = val.split() # ! splits also values like '"DejaVu Sans"'
289 words[0] = '"' + fn + '"'
290 document.header[j] = ft + ' ' + ' '.join(words)
291 if haveFontOpts and fontinfo.fonttype != "math":
292 fotag = "\\font_" + fontinfo.fonttype + "_opts"
293 fo = find_token(document.header, fotag)
295 document.header[fo] = fotag + " \"" + ",".join(options) + "\""
297 # Sensible place to insert tag
298 fo = find_token(document.header, "\\font_sf_scale")
300 document.warning("Malformed LyX document! Missing \\font_sf_scale")
302 document.header.insert(fo, fotag + " \"" + ",".join(options) + "\"")
306 def revert_fonts(document, fm, fontmap, OnlyWithXOpts = False, WithXOpts = False):
307 " Revert native font definition to LaTeX "
308 # fonlist := list of fonts created from the same package
309 # Empty package means that the font-name is the same as the package-name
310 # fontmap (key = package, val += found options) will be filled
311 # and used later in add_preamble_fonts() to be added to user-preamble
313 rfontscale = re.compile(r'^\s*(\\font_(roman|sans|typewriter|math))\s+')
314 rscales = re.compile(r'^\s*(\d+)\s+(\d+)')
316 while i < len(document.header):
317 i = find_re(document.header, rfontscale, i+1)
320 mo = rfontscale.search(document.header[i])
323 ft = mo.group(1) # 'roman', 'sans', 'typewriter', 'math'
324 val = get_value(document.header, ft, i)
325 words = val.split(' ') # ! splits also values like '"DejaVu Sans"'
326 font = words[0].strip('"') # TeX font name has no whitespace
327 if not font in fm.font2pkgmap:
329 fontinfo = fm.font2pkgmap[font]
330 val = fontinfo.package
331 if not val in fontmap:
334 if OnlyWithXOpts or WithXOpts:
335 if ft == "\\font_math":
337 regexp = re.compile(r'^\s*(\\font_roman_opts)\s+')
338 if ft == "\\font_sans":
339 regexp = re.compile(r'^\s*(\\font_sans_opts)\s+')
340 elif ft == "\\font_typewriter":
341 regexp = re.compile(r'^\s*(\\font_typewriter_opts)\s+')
342 x = find_re(document.header, regexp, 0)
343 if x == -1 and OnlyWithXOpts:
347 # We need to use this regex since split() does not handle quote protection
348 xopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
349 opts = xopts[1].strip('"').split(",")
350 fontmap[val].extend(opts)
351 del document.header[x]
352 words[0] = '"default"'
353 document.header[i] = ft + ' ' + ' '.join(words)
354 if fontinfo.scaleopt != None:
355 xval = get_value(document.header, "\\font_" + fontinfo.scaletype + "_scale", 0)
356 mo = rscales.search(xval)
361 # set correct scale option
362 fontmap[val].extend([fontinfo.scaleopt + "=" + format(float(xval1) / 100, '.2f')])
363 if fontinfo.osfopt != None:
365 if fontinfo.osfdef == "true":
367 osf = find_token(document.header, "\\font_osf " + oldval)
368 if osf == -1 and ft != "\\font_math":
369 # Try with newer format
370 osftag = "\\font_roman_osf " + oldval
371 if ft == "\\font_sans":
372 osftag = "\\font_sans_osf " + oldval
373 elif ft == "\\font_typewriter":
374 osftag = "\\font_typewriter_osf " + oldval
375 osf = find_token(document.header, osftag)
377 fontmap[val].extend([fontinfo.osfopt])
378 if len(fontinfo.options) > 0:
379 fontmap[val].extend(fontinfo.options)
382 ###############################################################################
384 ### Conversion and reversion routines
386 ###############################################################################
388 def convert_inputencoding_namechange(document):
389 " Rename inputencoding settings. "
390 i = find_token(document.header, "\\inputencoding", 0)
393 s = document.header[i].replace("auto", "auto-legacy")
394 document.header[i] = s.replace("default", "auto-legacy-plain")
396 def revert_inputencoding_namechange(document):
397 " Rename inputencoding settings. "
398 i = find_token(document.header, "\\inputencoding", 0)
401 s = document.header[i].replace("auto-legacy-plain", "default")
402 document.header[i] = s.replace("auto-legacy", "auto")
404 def convert_notoFonts(document):
405 " Handle Noto fonts definition to LaTeX "
407 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
408 fm = createFontMapping(['Noto'])
409 convert_fonts(document, fm)
411 def revert_notoFonts(document):
412 " Revert native Noto font definition to LaTeX "
414 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
416 fm = createFontMapping(['Noto'])
417 if revert_fonts(document, fm, fontmap):
418 add_preamble_fonts(document, fontmap)
420 def convert_latexFonts(document):
421 " Handle DejaVu and IBMPlex fonts definition to LaTeX "
423 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
424 fm = createFontMapping(['DejaVu', 'IBM'])
425 convert_fonts(document, fm)
427 def revert_latexFonts(document):
428 " Revert native DejaVu font definition to LaTeX "
430 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
432 fm = createFontMapping(['DejaVu', 'IBM'])
433 if revert_fonts(document, fm, fontmap):
434 add_preamble_fonts(document, fontmap)
436 def convert_AdobeFonts(document):
437 " Handle Adobe Source fonts definition to LaTeX "
439 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
440 fm = createFontMapping(['Adobe'])
441 convert_fonts(document, fm)
443 def revert_AdobeFonts(document):
444 " Revert Adobe Source font definition to LaTeX "
446 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
448 fm = createFontMapping(['Adobe'])
449 if revert_fonts(document, fm, fontmap):
450 add_preamble_fonts(document, fontmap)
452 def removeFrontMatterStyles(document):
453 " Remove styles Begin/EndFrontmatter"
455 layouts = ['BeginFrontmatter', 'EndFrontmatter']
456 tokenend = len('\\begin_layout ')
459 i = find_token_exact(document.body, '\\begin_layout ', i+1)
462 layout = document.body[i][tokenend:].strip()
463 if layout not in layouts:
465 j = find_end_of_layout(document.body, i)
467 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
469 while document.body[j+1].strip() == '':
471 document.body[i:j+1] = []
473 def addFrontMatterStyles(document):
474 " Use styles Begin/EndFrontmatter for elsarticle"
476 if document.textclass != "elsarticle":
479 def insertFrontmatter(prefix, line):
481 while above > 0 and document.body[above-1].strip() == '':
484 while document.body[below].strip() == '':
486 document.body[above:below] = ['', '\\begin_layout ' + prefix + 'Frontmatter',
487 '\\begin_inset Note Note',
489 '\\begin_layout Plain Layout',
492 '\\end_inset', '', '',
495 layouts = ['Title', 'Title footnote', 'Author', 'Author footnote',
496 'Corresponding author', 'Address', 'Email', 'Abstract', 'Keywords']
497 tokenend = len('\\begin_layout ')
501 i = find_token_exact(document.body, '\\begin_layout ', i+1)
504 layout = document.body[i][tokenend:].strip()
505 if layout not in layouts:
507 k = find_end_of_layout(document.body, i)
509 document.warning("Malformed LyX document: Can't find end of layout at line %d" % i)
516 insertFrontmatter('End', k+1)
517 insertFrontmatter('Begin', first)
520 def convert_lst_literalparam(document):
521 " Add param literal to include inset "
525 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
528 j = find_end_of_inset(document.body, i)
530 document.warning("Malformed LyX document: Can't find end of command inset at line %d" % i)
532 while i < j and document.body[i].strip() != '':
534 document.body.insert(i, 'literal "true"')
537 def revert_lst_literalparam(document):
538 " Remove param literal from include inset "
542 i = find_token(document.body, '\\begin_inset CommandInset include', i+1)
545 j = find_end_of_inset(document.body, i)
547 document.warning("Malformed LyX document: Can't find end of include inset at line %d" % i)
549 del_token(document.body, 'literal', i, j)
552 def revert_paratype(document):
553 " Revert ParaType font definitions to LaTeX "
555 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
557 i1 = find_token(document.header, "\\font_roman \"PTSerif-TLF\"", 0)
558 i2 = find_token(document.header, "\\font_sans \"default\"", 0)
559 i3 = find_token(document.header, "\\font_typewriter \"default\"", 0)
560 j = find_token(document.header, "\\font_sans \"PTSans-TLF\"", 0)
563 sfval = find_token(document.header, "\\font_sf_scale", 0)
565 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
567 sfscale = document.header[sfval].split()
570 document.header[sfval] = " ".join(sfscale)
573 sf_scale = float(val)
575 document.warning("Invalid font_sf_scale value: " + val)
578 if sf_scale != "100.0":
579 sfoption = "scaled=" + str(sf_scale / 100.0)
580 k = find_token(document.header, "\\font_typewriter \"PTMono-TLF\"", 0)
581 ttval = get_value(document.header, "\\font_tt_scale", 0)
586 ttoption = "scaled=" + format(float(ttval) / 100, '.2f')
587 if i1 != -1 and i2 != -1 and i3!= -1:
588 add_to_preamble(document, ["\\usepackage{paratype}"])
591 add_to_preamble(document, ["\\usepackage{PTSerif}"])
592 document.header[i1] = document.header[i1].replace("PTSerif-TLF", "default")
595 add_to_preamble(document, ["\\usepackage[" + sfoption + "]{PTSans}"])
597 add_to_preamble(document, ["\\usepackage{PTSans}"])
598 document.header[j] = document.header[j].replace("PTSans-TLF", "default")
601 add_to_preamble(document, ["\\usepackage[" + ttoption + "]{PTMono}"])
603 add_to_preamble(document, ["\\usepackage{PTMono}"])
604 document.header[k] = document.header[k].replace("PTMono-TLF", "default")
607 def revert_xcharter(document):
608 " Revert XCharter font definitions to LaTeX "
610 i = find_token(document.header, "\\font_roman \"xcharter\"", 0)
614 # replace unsupported font setting
615 document.header[i] = document.header[i].replace("xcharter", "default")
616 # no need for preamble code with system fonts
617 if get_bool_value(document.header, "\\use_non_tex_fonts"):
620 # transfer old style figures setting to package options
621 j = find_token(document.header, "\\font_osf true")
624 document.header[j] = "\\font_osf false"
628 add_to_preamble(document, ["\\usepackage%s{XCharter}"%options])
631 def revert_lscape(document):
632 " Reverts the landscape environment (Landscape module) to TeX-code "
634 if not "landscape" in document.get_module_list():
639 i = find_token(document.body, "\\begin_inset Flex Landscape", i+1)
642 j = find_end_of_inset(document.body, i)
644 document.warning("Malformed LyX document: Can't find end of Landscape inset")
647 if document.body[i] == "\\begin_inset Flex Landscape (Floating)":
648 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}}")
649 document.body[i : i + 4] = put_cmd_in_ert("\\afterpage{\\begin{landscape}")
650 add_to_preamble(document, ["\\usepackage{afterpage}"])
652 document.body[j - 2 : j + 1] = put_cmd_in_ert("\\end{landscape}")
653 document.body[i : i + 4] = put_cmd_in_ert("\\begin{landscape}")
655 add_to_preamble(document, ["\\usepackage{pdflscape}"])
658 def convert_fontenc(document):
659 " Convert default fontenc setting "
661 i = find_token(document.header, "\\fontencoding global", 0)
665 document.header[i] = document.header[i].replace("global", "auto")
668 def revert_fontenc(document):
669 " Revert default fontenc setting "
671 i = find_token(document.header, "\\fontencoding auto", 0)
675 document.header[i] = document.header[i].replace("auto", "global")
678 def revert_nospellcheck(document):
679 " Remove nospellcheck font info param "
683 i = find_token(document.body, '\\nospellcheck', i)
689 def revert_floatpclass(document):
690 " Remove float placement params 'document' and 'class' "
692 del_token(document.header, "\\float_placement class")
696 i = find_token(document.body, '\\begin_inset Float', i+1)
699 j = find_end_of_inset(document.body, i)
700 k = find_token(document.body, 'placement class', i, i + 2)
702 k = find_token(document.body, 'placement document', i, i + 2)
709 def revert_floatalignment(document):
710 " Remove float alignment params "
712 galignment = get_value(document.header, "\\float_alignment", delete=True)
716 i = find_token(document.body, '\\begin_inset Float', i+1)
719 j = find_end_of_inset(document.body, i)
721 document.warning("Malformed LyX document: Can't find end of inset at line " + str(i))
723 k = find_token(document.body, 'alignment', i, i+4)
727 alignment = get_value(document.body, "alignment", k)
728 if alignment == "document":
729 alignment = galignment
731 l = find_token(document.body, "\\begin_layout Plain Layout", i, j)
733 document.warning("Can't find float layout!")
736 if alignment == "left":
737 alcmd = put_cmd_in_ert("\\raggedright{}")
738 elif alignment == "center":
739 alcmd = put_cmd_in_ert("\\centering{}")
740 elif alignment == "right":
741 alcmd = put_cmd_in_ert("\\raggedleft{}")
743 document.body[l+1:l+1] = alcmd
746 def revert_tuftecite(document):
747 " Revert \cite commands in tufte classes "
749 tufte = ["tufte-book", "tufte-handout"]
750 if document.textclass not in tufte:
755 i = find_token(document.body, "\\begin_inset CommandInset citation", i+1)
758 j = find_end_of_inset(document.body, i)
760 document.warning("Can't find end of citation inset at line %d!!" %(i))
762 k = find_token(document.body, "LatexCommand", i, j)
764 document.warning("Can't find LatexCommand for citation inset at line %d!" %(i))
767 cmd = get_value(document.body, "LatexCommand", k)
771 pre = get_quoted_value(document.body, "before", i, j)
772 post = get_quoted_value(document.body, "after", i, j)
773 key = get_quoted_value(document.body, "key", i, j)
775 document.warning("Citation inset at line %d does not have a key!" %(i))
777 # Replace command with ERT
780 res += "[" + pre + "]"
782 res += "[" + post + "]"
785 res += "{" + key + "}"
786 document.body[i:j+1] = put_cmd_in_ert([res])
790 def revert_stretchcolumn(document):
791 " We remove the column varwidth flags or everything else will become a mess. "
794 i = find_token(document.body, "\\begin_inset Tabular", i+1)
797 j = find_end_of_inset(document.body, i+1)
799 document.warning("Malformed LyX document: Could not find end of tabular.")
801 for k in range(i, j):
802 if re.search('^<column.*varwidth="[^"]+".*>$', document.body[k]):
803 document.warning("Converting 'tabularx'/'xltabular' table to normal table.")
804 document.body[k] = document.body[k].replace(' varwidth="true"', '')
807 def revert_vcolumns(document):
808 " Revert standard columns with line breaks etc. "
814 i = find_token(document.body, "\\begin_inset Tabular", i+1)
817 j = find_end_of_inset(document.body, i)
819 document.warning("Malformed LyX document: Could not find end of tabular.")
822 # Collect necessary column information
824 nrows = int(document.body[i+1].split('"')[3])
825 ncols = int(document.body[i+1].split('"')[5])
827 for k in range(ncols):
828 m = find_token(document.body, "<column", m)
829 width = get_option_value(document.body[m], 'width')
830 varwidth = get_option_value(document.body[m], 'varwidth')
831 alignment = get_option_value(document.body[m], 'alignment')
832 special = get_option_value(document.body[m], 'special')
833 col_info.append([width, varwidth, alignment, special, m])
838 for row in range(nrows):
839 for col in range(ncols):
840 m = find_token(document.body, "<cell", m)
841 multicolumn = get_option_value(document.body[m], 'multicolumn')
842 multirow = get_option_value(document.body[m], 'multirow')
843 width = get_option_value(document.body[m], 'width')
844 rotate = get_option_value(document.body[m], 'rotate')
845 # Check for: linebreaks, multipars, non-standard environments
847 endcell = find_token(document.body, "</cell>", begcell)
849 if find_token(document.body, "\\begin_inset Newline", begcell, endcell) != -1:
851 elif count_pars_in_inset(document.body, begcell + 2) > 1:
853 elif get_value(document.body, "\\begin_layout", begcell) != "Plain Layout":
855 if vcand and rotate == "" and ((multicolumn == "" and multirow == "") or width == ""):
856 if col_info[col][0] == "" and col_info[col][1] == "" and col_info[col][3] == "":
858 alignment = col_info[col][2]
859 col_line = col_info[col][4]
861 if alignment == "center":
862 vval = ">{\\centering}"
863 elif alignment == "left":
864 vval = ">{\\raggedright}"
865 elif alignment == "right":
866 vval = ">{\\raggedleft}"
869 vval += "V{\\linewidth}"
871 document.body[col_line] = document.body[col_line][:-1] + " special=\"" + vval + "\">"
872 # ERT newlines and linebreaks (since LyX < 2.4 automatically inserts parboxes
873 # with newlines, and we do not want that)
875 endcell = find_token(document.body, "</cell>", begcell)
877 nl = find_token(document.body, "\\begin_inset Newline newline", begcell, endcell)
879 nl = find_token(document.body, "\\begin_inset Newline linebreak", begcell, endcell)
883 nle = find_end_of_inset(document.body, nl)
884 del(document.body[nle:nle+1])
886 document.body[nl:nl+1] = put_cmd_in_ert("\\linebreak{}")
888 document.body[nl:nl+1] = put_cmd_in_ert("\\\\")
894 if needarray == True:
895 add_to_preamble(document, ["\\usepackage{array}"])
896 if needvarwidth == True:
897 add_to_preamble(document, ["\\usepackage{varwidth}"])
900 def revert_bibencoding(document):
901 " Revert bibliography encoding "
905 i = find_token(document.header, "\\cite_engine", 0)
907 document.warning("Malformed document! Missing \\cite_engine")
909 engine = get_value(document.header, "\\cite_engine", i)
913 if engine in ["biblatex", "biblatex-natbib"]:
916 # Map lyx to latex encoding names
920 "armscii8" : "armscii8",
921 "iso8859-1" : "latin1",
922 "iso8859-2" : "latin2",
923 "iso8859-3" : "latin3",
924 "iso8859-4" : "latin4",
925 "iso8859-5" : "iso88595",
926 "iso8859-6" : "8859-6",
927 "iso8859-7" : "iso-8859-7",
928 "iso8859-8" : "8859-8",
929 "iso8859-9" : "latin5",
930 "iso8859-13" : "latin7",
931 "iso8859-15" : "latin9",
932 "iso8859-16" : "latin10",
933 "applemac" : "applemac",
935 "cp437de" : "cp437de",
952 "utf8-platex" : "utf8",
959 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
962 j = find_end_of_inset(document.body, i)
964 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
966 encoding = get_quoted_value(document.body, "encoding", i, j)
969 # remove encoding line
970 k = find_token(document.body, "encoding", i, j)
973 if encoding == "default":
975 # Re-find inset end line
976 j = find_end_of_inset(document.body, i)
979 h = find_token(document.header, "\\biblio_options", 0)
981 biblio_options = get_value(document.header, "\\biblio_options", h)
982 if not "bibencoding" in biblio_options:
983 document.header[h] += ",bibencoding=%s" % encodings[encoding]
985 bs = find_token(document.header, "\\biblatex_bibstyle", 0)
987 # this should not happen
988 document.warning("Malformed LyX document! No \\biblatex_bibstyle header found!")
990 document.header[bs-1 : bs-1] = ["\\biblio_options bibencoding=" + encodings[encoding]]
992 document.body[j+1:j+1] = put_cmd_in_ert("\\egroup")
993 document.body[i:i] = put_cmd_in_ert("\\bgroup\\inputencoding{" + encodings[encoding] + "}")
999 def convert_vcsinfo(document):
1000 " Separate vcs Info inset from buffer Info inset. "
1003 "vcs-revision" : "revision",
1004 "vcs-tree-revision" : "tree-revision",
1005 "vcs-author" : "author",
1006 "vcs-time" : "time",
1011 i = find_token(document.body, "\\begin_inset Info", i+1)
1014 j = find_end_of_inset(document.body, i+1)
1016 document.warning("Malformed LyX document: Could not find end of Info inset.")
1018 tp = find_token(document.body, 'type', i, j)
1019 tpv = get_quoted_value(document.body, "type", tp)
1022 arg = find_token(document.body, 'arg', i, j)
1023 argv = get_quoted_value(document.body, "arg", arg)
1024 if argv not in list(types.keys()):
1026 document.body[tp] = "type \"vcs\""
1027 document.body[arg] = "arg \"" + types[argv] + "\""
1030 def revert_vcsinfo(document):
1031 " Merge vcs Info inset to buffer Info inset. "
1033 args = ["revision", "tree-revision", "author", "time", "date" ]
1036 i = find_token(document.body, "\\begin_inset Info", i+1)
1039 j = find_end_of_inset(document.body, i+1)
1041 document.warning("Malformed LyX document: Could not find end of Info inset.")
1043 tp = find_token(document.body, 'type', i, j)
1044 tpv = get_quoted_value(document.body, "type", tp)
1047 arg = find_token(document.body, 'arg', i, j)
1048 argv = get_quoted_value(document.body, "arg", arg)
1049 if argv not in args:
1050 document.warning("Malformed Info inset. Invalid vcs arg.")
1052 document.body[tp] = "type \"buffer\""
1053 document.body[arg] = "arg \"vcs-" + argv + "\""
1056 def revert_dateinfo(document):
1057 " Revert date info insets to static text. "
1059 # FIXME This currently only considers the main language and uses the system locale
1060 # Ideally, it should honor context languages and switch the locale accordingly.
1062 # The date formats for each language using strftime syntax:
1063 # long, short, loclong, locmedium, locshort
1065 "afrikaans" : ["%A, %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1066 "albanian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1067 "american" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1068 "amharic" : ["%A ፣%d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1069 "ancientgreek" : ["%A, %d %B %Y", "%d %b %Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1070 "arabic_arabi" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1071 "arabic_arabtex" : ["%A، %d %B، %Y", "%d/%m/%Y", "%d %B، %Y", "%d/%m/%Y", "%d/%m/%Y"],
1072 "armenian" : ["%Y թ. %B %d, %A", "%d.%m.%y", "%d %B، %Y", "%d %b، %Y", "%d/%m/%Y"],
1073 "asturian" : ["%A, %d %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1074 "australian" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1075 "austrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1076 "bahasa" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1077 "bahasam" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1078 "basque" : ["%Y(e)ko %B %d, %A", "%y/%m/%d", "%Y %B %d", "%Y %b %d", "%Y/%m/%d"],
1079 "belarusian" : ["%A, %d %B %Y г.", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1080 "bosnian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%Y-%m-%d"],
1081 "brazilian" : ["%A, %d de %B de %Y", "%d/%m/%Y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1082 "breton" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1083 "british" : ["%A, %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1084 "bulgarian" : ["%A, %d %B %Y г.", "%d.%m.%y г.", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1085 "canadian" : ["%A, %B %d, %Y", "%Y-%m-%d", "%B %d, %Y", "%d %b %Y", "%Y-%m-%d"],
1086 "canadien" : ["%A %d %B %Y", "%y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1087 "catalan" : ["%A, %d %B de %Y", "%d/%m/%y", "%d / %B / %Y", "%d / %b / %Y", "%d/%m/%Y"],
1088 "chinese-simplified" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y-%m-%d", "%y-%m-%d"],
1089 "chinese-traditional" : ["%Y年%m月%d日 %A", "%Y/%m/%d", "%Y年%m月%d日", "%Y年%m月%d日", "%y年%m月%d日"],
1090 "coptic" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1091 "croatian" : ["%A, %d. %B %Y.", "%d. %m. %Y.", "%d. %B %Y.", "%d. %b. %Y.", "%d.%m.%Y."],
1092 "czech" : ["%A %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b. %Y", "%d.%m.%Y"],
1093 "danish" : ["%A den %d. %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1094 "divehi" : ["%Y %B %d, %A", "%Y-%m-%d", "%Y %B %d", "%Y %b %d", "%d/%m/%Y"],
1095 "dutch" : ["%A %d %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1096 "english" : ["%A, %B %d, %Y", "%m/%d/%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1097 "esperanto" : ["%A, %d %B %Y", "%d %b %Y", "la %d de %B %Y", "la %d de %b %Y", "%m/%d/%Y"],
1098 "estonian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1099 "farsi" : ["%A %d %B %Y", "%Y/%m/%d", "%d %B %Y", "%d %b %Y", "%Y/%m/%d"],
1100 "finnish" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1101 "french" : ["%A %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1102 "friulan" : ["%A %d di %B dal %Y", "%d/%m/%y", "%d di %B dal %Y", "%d di %b dal %Y", "%d/%m/%Y"],
1103 "galician" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%d/%m/%Y"],
1104 "georgian" : ["%A, %d %B, %Y", "%d.%m.%y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1105 "german" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1106 "german-ch" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1107 "german-ch-old" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1108 "greek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1109 "hebrew" : ["%A, %d ב%B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1110 "hindi" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1111 "icelandic" : ["%A, %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1112 "interlingua" : ["%Y %B %d, %A", "%Y-%m-%d", "le %d de %B %Y", "le %d de %b %Y", "%Y-%m-%d"],
1113 "irish" : ["%A %d %B %Y", "%d/%m/%Y", "%d. %B %Y", "%d. %b %Y", "%d/%m/%Y"],
1114 "italian" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d/%b/%Y", "%d/%m/%Y"],
1115 "japanese" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1116 "japanese-cjk" : ["%Y年%m月%d日%A", "%Y/%m/%d", "%Y年%m月%d日", "%Y/%m/%d", "%y/%m/%d"],
1117 "kannada" : ["%A, %B %d, %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d-%m-%Y"],
1118 "kazakh" : ["%Y ж. %d %B, %A", "%d.%m.%y", "%d %B %Y", "%d %B %Y", "%Y-%d-%m"],
1119 "khmer" : ["%A %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1120 "korean" : ["%Y년 %m월 %d일 %A", "%y. %m. %d.", "%Y년 %m월 %d일", "%Y. %m. %d.", "%y. %m. %d."],
1121 "kurmanji" : ["%A, %d %B %Y", "%d %b %Y", "%d. %B %Y", "%d. %m. %Y", "%Y-%m-%d"],
1122 "lao" : ["%A ທີ %d %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %B %Y", "%d/%m/%Y"],
1123 "latin" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1124 "latvian" : ["%A, %Y. gada %d. %B", "%d.%m.%y", "%Y. gada %d. %B", "%Y. gada %d. %b", "%d.%m.%Y"],
1125 "lithuanian" : ["%Y m. %B %d d., %A", "%Y-%m-%d", "%Y m. %B %d d.", "%Y m. %B %d d.", "%Y-%m-%d"],
1126 "lowersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1127 "macedonian" : ["%A, %d %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1128 "magyar" : ["%Y. %B %d., %A", "%Y. %m. %d.", "%Y. %B %d.", "%Y. %b %d.", "%Y.%m.%d."],
1129 "malayalam" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1130 "marathi" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1131 "mongolian" : ["%A, %Y оны %m сарын %d", "%Y-%m-%d", "%Y оны %m сарын %d", "%d-%m-%Y", "%d-%m-%Y"],
1132 "naustrian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1133 "newzealand" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1134 "ngerman" : ["%A, %d. %B %Y", "%d.%m.%y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1135 "norsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1136 "nynorsk" : ["%A %d. %B %Y", "%d.%m.%Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1137 "occitan" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1138 "piedmontese" : ["%A, %d %B %Y", "%d %b %Y", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1139 "polish" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1140 "polutonikogreek" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1141 "portuguese" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B de %Y", "%d de %b de %Y", "%Y/%m/%d"],
1142 "romanian" : ["%A, %d %B %Y", "%d.%m.%Y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1143 "romansh" : ["%A, ils %d da %B %Y", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1144 "russian" : ["%A, %d %B %Y г.", "%d.%m.%Y", "%d %B %Y г.", "%d %b %Y г.", "%d.%m.%Y"],
1145 "samin" : ["%Y %B %d, %A", "%Y-%m-%d", "%B %d. b. %Y", "%b %d. b. %Y", "%d.%m.%Y"],
1146 "sanskrit" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1147 "scottish" : ["%A, %dmh %B %Y", "%d/%m/%Y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1148 "serbian" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1149 "serbian-latin" : ["%A, %d. %B %Y.", "%d.%m.%y.", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1150 "slovak" : ["%A, %d. %B %Y", "%d. %m. %Y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1151 "slovene" : ["%A, %d. %B %Y", "%d. %m. %y", "%d. %B %Y", "%d. %b %Y", "%d.%m.%Y"],
1152 "spanish" : ["%A, %d de %B de %Y", "%d/%m/%y", "%d de %B %de %Y", "%d %b %Y", "%d/%m/%Y"],
1153 "spanish-mexico" : ["%A, %d de %B %de %Y", "%d/%m/%y", "%d de %B de %Y", "%d %b %Y", "%d/%m/%Y"],
1154 "swedish" : ["%A %d %B %Y", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%Y-%m-%d"],
1155 "syriac" : ["%Y %B %d, %A", "%Y-%m-%d", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1156 "tamil" : ["%A, %d %B, %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1157 "telugu" : ["%d, %B %Y, %A", "%d-%m-%y", "%d %B %Y", "%d %b %Y", "%d-%m-%Y"],
1158 "thai" : ["%Aที่ %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1159 "tibetan" : ["%Y %Bའི་ཚེས་%d, %A", "%Y-%m-%d", "%B %d, %Y", "%b %d, %Y", "%m/%d/%Y"],
1160 "turkish" : ["%d %B %Y %A", "%d.%m.%Y", "%d %B %Y", "%d.%b.%Y", "%d.%m.%Y"],
1161 "turkmen" : ["%d %B %Y %A", "%d.%m.%Y", "%Y ý. %B %d", "%d.%m.%Y ý.", "%d.%m.%y ý."],
1162 "ukrainian" : ["%A, %d %B %Y р.", "%d.%m.%y", "%d %B %Y", "%d %m %Y", "%d.%m.%Y"],
1163 "uppersorbian" : ["%A, %d. %B %Y", "%d.%m.%y", "%d %B %Y", "%d %b %Y", "%d.%m.%Y"],
1164 "urdu" : ["%A، %d %B، %Y", "%d/%m/%y", "%d %B, %Y", "%d %b %Y", "%d/%m/%Y"],
1165 "vietnamese" : ["%A, %d %B, %Y", "%d/%m/%Y", "%d tháng %B %Y", "%d-%m-%Y", "%d/%m/%Y"],
1166 "welsh" : ["%A, %d %B %Y", "%d/%m/%y", "%d %B %Y", "%d %b %Y", "%d/%m/%Y"],
1169 types = ["date", "fixdate", "moddate" ]
1170 lang = get_value(document.header, "\\language")
1172 document.warning("Malformed LyX document! No \\language header found!")
1177 i = find_token(document.body, "\\begin_inset Info", i+1)
1180 j = find_end_of_inset(document.body, i+1)
1182 document.warning("Malformed LyX document: Could not find end of Info inset.")
1184 tp = find_token(document.body, 'type', i, j)
1185 tpv = get_quoted_value(document.body, "type", tp)
1186 if tpv not in types:
1188 arg = find_token(document.body, 'arg', i, j)
1189 argv = get_quoted_value(document.body, "arg", arg)
1192 if tpv == "fixdate":
1193 datecomps = argv.split('@')
1194 if len(datecomps) > 1:
1196 isodate = datecomps[1]
1197 m = re.search('(\d\d\d\d)-(\d\d)-(\d\d)', isodate)
1199 dte = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1200 # FIXME if we had the path to the original document (not the one in the tmp dir),
1201 # we could use the mtime.
1202 # elif tpv == "moddate":
1203 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1206 result = dte.isodate()
1207 elif argv == "long":
1208 result = dte.strftime(dateformats[lang][0])
1209 elif argv == "short":
1210 result = dte.strftime(dateformats[lang][1])
1211 elif argv == "loclong":
1212 result = dte.strftime(dateformats[lang][2])
1213 elif argv == "locmedium":
1214 result = dte.strftime(dateformats[lang][3])
1215 elif argv == "locshort":
1216 result = dte.strftime(dateformats[lang][4])
1218 fmt = argv.replace("MMMM", "%b").replace("MMM", "%b").replace("MM", "%m").replace("M", "%m")
1219 fmt = fmt.replace("yyyy", "%Y").replace("yy", "%y")
1220 fmt = fmt.replace("dddd", "%A").replace("ddd", "%a").replace("dd", "%d")
1221 fmt = re.sub('[^\'%]d', '%d', fmt)
1222 fmt = fmt.replace("'", "")
1223 result = dte.strftime(fmt)
1224 if sys.version_info < (3,0):
1225 # In Python 2, datetime module works with binary strings,
1226 # our dateformat strings are utf8-encoded:
1227 result = result.decode('utf-8')
1228 document.body[i : j+1] = [result]
1231 def revert_timeinfo(document):
1232 " Revert time info insets to static text. "
1234 # FIXME This currently only considers the main language and uses the system locale
1235 # Ideally, it should honor context languages and switch the locale accordingly.
1236 # Also, the time object is "naive", i.e., it does not know of timezones (%Z will
1239 # The time formats for each language using strftime syntax:
1242 "afrikaans" : ["%H:%M:%S %Z", "%H:%M"],
1243 "albanian" : ["%I:%M:%S %p, %Z", "%I:%M %p"],
1244 "american" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1245 "amharic" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1246 "ancientgreek" : ["%H:%M:%S %Z", "%H:%M:%S"],
1247 "arabic_arabi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1248 "arabic_arabtex" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1249 "armenian" : ["%H:%M:%S %Z", "%H:%M"],
1250 "asturian" : ["%H:%M:%S %Z", "%H:%M"],
1251 "australian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1252 "austrian" : ["%H:%M:%S %Z", "%H:%M"],
1253 "bahasa" : ["%H.%M.%S %Z", "%H.%M"],
1254 "bahasam" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1255 "basque" : ["%H:%M:%S (%Z)", "%H:%M"],
1256 "belarusian" : ["%H:%M:%S, %Z", "%H:%M"],
1257 "bosnian" : ["%H:%M:%S %Z", "%H:%M"],
1258 "brazilian" : ["%H:%M:%S %Z", "%H:%M"],
1259 "breton" : ["%H:%M:%S %Z", "%H:%M"],
1260 "british" : ["%H:%M:%S %Z", "%H:%M"],
1261 "bulgarian" : ["%H:%M:%S %Z", "%H:%M"],
1262 "canadian" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1263 "canadien" : ["%H:%M:%S %Z", "%H h %M"],
1264 "catalan" : ["%H:%M:%S %Z", "%H:%M"],
1265 "chinese-simplified" : ["%Z %p%I:%M:%S", "%p%I:%M"],
1266 "chinese-traditional" : ["%p%I:%M:%S [%Z]", "%p%I:%M"],
1267 "coptic" : ["%H:%M:%S %Z", "%H:%M:%S"],
1268 "croatian" : ["%H:%M:%S (%Z)", "%H:%M"],
1269 "czech" : ["%H:%M:%S %Z", "%H:%M"],
1270 "danish" : ["%H.%M.%S %Z", "%H.%M"],
1271 "divehi" : ["%H:%M:%S %Z", "%H:%M"],
1272 "dutch" : ["%H:%M:%S %Z", "%H:%M"],
1273 "english" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1274 "esperanto" : ["%H:%M:%S %Z", "%H:%M:%S"],
1275 "estonian" : ["%H:%M:%S %Z", "%H:%M"],
1276 "farsi" : ["%H:%M:%S (%Z)", "%H:%M"],
1277 "finnish" : ["%H.%M.%S %Z", "%H.%M"],
1278 "french" : ["%H:%M:%S %Z", "%H:%M"],
1279 "friulan" : ["%H:%M:%S %Z", "%H:%M"],
1280 "galician" : ["%H:%M:%S %Z", "%H:%M"],
1281 "georgian" : ["%H:%M:%S %Z", "%H:%M"],
1282 "german" : ["%H:%M:%S %Z", "%H:%M"],
1283 "german-ch" : ["%H:%M:%S %Z", "%H:%M"],
1284 "german-ch-old" : ["%H:%M:%S %Z", "%H:%M"],
1285 "greek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1286 "hebrew" : ["%H:%M:%S %Z", "%H:%M"],
1287 "hindi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1288 "icelandic" : ["%H:%M:%S %Z", "%H:%M"],
1289 "interlingua" : ["%H:%M:%S %Z", "%H:%M"],
1290 "irish" : ["%H:%M:%S %Z", "%H:%M"],
1291 "italian" : ["%H:%M:%S %Z", "%H:%M"],
1292 "japanese" : ["%H時%M分%S秒 %Z", "%H:%M"],
1293 "japanese-cjk" : ["%H時%M分%S秒 %Z", "%H:%M"],
1294 "kannada" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1295 "kazakh" : ["%H:%M:%S %Z", "%H:%M"],
1296 "khmer" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1297 "korean" : ["%p %I시%M분 %S초 %Z", "%p %I:%M"],
1298 "kurmanji" : ["%H:%M:%S %Z", "%H:%M:%S"],
1299 "lao" : ["%H ໂມງ%M ນາທີ %S ວິນາທີ %Z", "%H:%M"],
1300 "latin" : ["%H:%M:%S %Z", "%H:%M:%S"],
1301 "latvian" : ["%H:%M:%S %Z", "%H:%M"],
1302 "lithuanian" : ["%H:%M:%S %Z", "%H:%M"],
1303 "lowersorbian" : ["%H:%M:%S %Z", "%H:%M"],
1304 "macedonian" : ["%H:%M:%S %Z", "%H:%M"],
1305 "magyar" : ["%H:%M:%S %Z", "%H:%M"],
1306 "malayalam" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1307 "marathi" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1308 "mongolian" : ["%H:%M:%S %Z", "%H:%M"],
1309 "naustrian" : ["%H:%M:%S %Z", "%H:%M"],
1310 "newzealand" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1311 "ngerman" : ["%H:%M:%S %Z", "%H:%M"],
1312 "norsk" : ["%H:%M:%S %Z", "%H:%M"],
1313 "nynorsk" : ["kl. %H:%M:%S %Z", "%H:%M"],
1314 "occitan" : ["%H:%M:%S %Z", "%H:%M"],
1315 "piedmontese" : ["%H:%M:%S %Z", "%H:%M:%S"],
1316 "polish" : ["%H:%M:%S %Z", "%H:%M"],
1317 "polutonikogreek" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1318 "portuguese" : ["%H:%M:%S %Z", "%H:%M"],
1319 "romanian" : ["%H:%M:%S %Z", "%H:%M"],
1320 "romansh" : ["%H:%M:%S %Z", "%H:%M"],
1321 "russian" : ["%H:%M:%S %Z", "%H:%M"],
1322 "samin" : ["%H:%M:%S %Z", "%H:%M"],
1323 "sanskrit" : ["%H:%M:%S %Z", "%H:%M"],
1324 "scottish" : ["%H:%M:%S %Z", "%H:%M"],
1325 "serbian" : ["%H:%M:%S %Z", "%H:%M"],
1326 "serbian-latin" : ["%H:%M:%S %Z", "%H:%M"],
1327 "slovak" : ["%H:%M:%S %Z", "%H:%M"],
1328 "slovene" : ["%H:%M:%S %Z", "%H:%M"],
1329 "spanish" : ["%H:%M:%S (%Z)", "%H:%M"],
1330 "spanish-mexico" : ["%H:%M:%S %Z", "%H:%M"],
1331 "swedish" : ["kl. %H:%M:%S %Z", "%H:%M"],
1332 "syriac" : ["%H:%M:%S %Z", "%H:%M"],
1333 "tamil" : ["%p %I:%M:%S %Z", "%p %I:%M"],
1334 "telugu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1335 "thai" : ["%H นาฬิกา %M นาที %S วินาที %Z", "%H:%M"],
1336 "tibetan" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1337 "turkish" : ["%H:%M:%S %Z", "%H:%M"],
1338 "turkmen" : ["%H:%M:%S %Z", "%H:%M"],
1339 "ukrainian" : ["%H:%M:%S %Z", "%H:%M"],
1340 "uppersorbian" : ["%H:%M:%S %Z", "%H:%M hodź."],
1341 "urdu" : ["%I:%M:%S %p %Z", "%I:%M %p"],
1342 "vietnamese" : ["%H:%M:%S %Z", "%H:%M"],
1343 "welsh" : ["%H:%M:%S %Z", "%H:%M"]
1346 types = ["time", "fixtime", "modtime" ]
1348 i = find_token(document.header, "\\language", 0)
1350 # this should not happen
1351 document.warning("Malformed LyX document! No \\language header found!")
1353 lang = get_value(document.header, "\\language", i)
1357 i = find_token(document.body, "\\begin_inset Info", i+1)
1360 j = find_end_of_inset(document.body, i+1)
1362 document.warning("Malformed LyX document: Could not find end of Info inset.")
1364 tp = find_token(document.body, 'type', i, j)
1365 tpv = get_quoted_value(document.body, "type", tp)
1366 if tpv not in types:
1368 arg = find_token(document.body, 'arg', i, j)
1369 argv = get_quoted_value(document.body, "arg", arg)
1371 dtme = datetime.now()
1373 if tpv == "fixtime":
1374 timecomps = argv.split('@')
1375 if len(timecomps) > 1:
1377 isotime = timecomps[1]
1378 m = re.search('(\d\d):(\d\d):(\d\d)', isotime)
1380 tme = time(int(m.group(1)), int(m.group(2)), int(m.group(3)))
1382 m = re.search('(\d\d):(\d\d)', isotime)
1384 tme = time(int(m.group(1)), int(m.group(2)))
1385 # FIXME if we had the path to the original document (not the one in the tmp dir),
1386 # we could use the mtime.
1387 # elif tpv == "moddate":
1388 # dte = date.fromtimestamp(os.path.getmtime(document.dir))
1391 result = tme.isoformat()
1392 elif argv == "long":
1393 result = tme.strftime(timeformats[lang][0])
1394 elif argv == "short":
1395 result = tme.strftime(timeformats[lang][1])
1397 fmt = argv.replace("HH", "%H").replace("H", "%H").replace("hh", "%I").replace("h", "%I")
1398 fmt = fmt.replace("mm", "%M").replace("m", "%M").replace("ss", "%S").replace("s", "%S")
1399 fmt = fmt.replace("zzz", "%f").replace("z", "%f").replace("t", "%Z")
1400 fmt = fmt.replace("AP", "%p").replace("ap", "%p").replace("A", "%p").replace("a", "%p")
1401 fmt = fmt.replace("'", "")
1402 result = dte.strftime(fmt)
1403 document.body[i : j+1] = result
1406 def revert_namenoextinfo(document):
1407 " Merge buffer Info inset type name-noext to name. "
1411 i = find_token(document.body, "\\begin_inset Info", i+1)
1414 j = find_end_of_inset(document.body, i+1)
1416 document.warning("Malformed LyX document: Could not find end of Info inset.")
1418 tp = find_token(document.body, 'type', i, j)
1419 tpv = get_quoted_value(document.body, "type", tp)
1422 arg = find_token(document.body, 'arg', i, j)
1423 argv = get_quoted_value(document.body, "arg", arg)
1424 if argv != "name-noext":
1426 document.body[arg] = "arg \"name\""
1429 def revert_l7ninfo(document):
1430 " Revert l7n Info inset to text. "
1434 i = find_token(document.body, "\\begin_inset Info", i+1)
1437 j = find_end_of_inset(document.body, i+1)
1439 document.warning("Malformed LyX document: Could not find end of Info inset.")
1441 tp = find_token(document.body, 'type', i, j)
1442 tpv = get_quoted_value(document.body, "type", tp)
1445 arg = find_token(document.body, 'arg', i, j)
1446 argv = get_quoted_value(document.body, "arg", arg)
1447 # remove trailing colons, menu accelerator (|...) and qt accelerator (&), while keeping literal " & "
1448 argv = argv.rstrip(':').split('|')[0].replace(" & ", "</amp;>").replace("&", "").replace("</amp;>", " & ")
1449 document.body[i : j+1] = argv
1452 def revert_listpargs(document):
1453 " Reverts listpreamble arguments to TeX-code "
1456 i = find_token(document.body, "\\begin_inset Argument listpreamble:", i+1)
1459 j = find_end_of_inset(document.body, i)
1460 # Find containing paragraph layout
1461 parent = get_containing_layout(document.body, i)
1463 document.warning("Malformed LyX document: Can't find parent paragraph layout")
1466 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1467 endPlain = find_end_of_layout(document.body, beginPlain)
1468 content = document.body[beginPlain + 1 : endPlain]
1469 del document.body[i:j+1]
1470 subst = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout",
1471 "{"] + content + ["}", "\\end_layout", "", "\\end_inset", ""]
1472 document.body[parbeg : parbeg] = subst
1475 def revert_lformatinfo(document):
1476 " Revert layout format Info inset to text. "
1480 i = find_token(document.body, "\\begin_inset Info", i+1)
1483 j = find_end_of_inset(document.body, i+1)
1485 document.warning("Malformed LyX document: Could not find end of Info inset.")
1487 tp = find_token(document.body, 'type', i, j)
1488 tpv = get_quoted_value(document.body, "type", tp)
1489 if tpv != "lyxinfo":
1491 arg = find_token(document.body, 'arg', i, j)
1492 argv = get_quoted_value(document.body, "arg", arg)
1493 if argv != "layoutformat":
1496 document.body[i : j+1] = "69"
1499 def convert_hebrew_parentheses(document):
1500 """ Swap opening/closing parentheses in Hebrew text.
1502 Up to LyX 2.4, "(" was used as closing parenthesis and
1503 ")" as opening parenthesis for Hebrew in the LyX source.
1505 # print("convert hebrew parentheses")
1506 current_languages = [document.language]
1507 for i, line in enumerate(document.body):
1508 if line.startswith('\\lang '):
1509 current_languages[-1] = line.lstrip('\\lang ')
1510 elif line.startswith('\\begin_layout'):
1511 current_languages.append(current_languages[-1])
1512 # print (line, current_languages[-1])
1513 elif line.startswith('\\end_layout'):
1514 current_languages.pop()
1515 elif current_languages[-1] == 'hebrew' and not line.startswith('\\'):
1516 document.body[i] = line.replace('(','\x00').replace(')','(').replace('\x00',')')
1519 def revert_hebrew_parentheses(document):
1520 " Store parentheses in Hebrew text reversed"
1521 # This only exists to keep the convert/revert naming convention
1522 convert_hebrew_parentheses(document)
1525 def revert_malayalam(document):
1526 " Set the document language to English but assure Malayalam output "
1528 revert_language(document, "malayalam", "", "malayalam")
1531 def revert_soul(document):
1532 " Revert soul module flex insets to ERT "
1534 flexes = ["Spaceletters", "Strikethrough", "Underline", "Highlight", "Capitalize"]
1537 i = find_token(document.body, "\\begin_inset Flex %s" % flex, 0)
1539 add_to_preamble(document, ["\\usepackage{soul}"])
1541 i = find_token(document.body, "\\begin_inset Flex Highlight", 0)
1543 add_to_preamble(document, ["\\usepackage{color}"])
1545 revert_flex_inset(document.body, "Spaceletters", "\\so")
1546 revert_flex_inset(document.body, "Strikethrough", "\\st")
1547 revert_flex_inset(document.body, "Underline", "\\ul")
1548 revert_flex_inset(document.body, "Highlight", "\\hl")
1549 revert_flex_inset(document.body, "Capitalize", "\\caps")
1552 def revert_tablestyle(document):
1553 " Remove tablestyle params "
1556 i = find_token(document.header, "\\tablestyle")
1558 del document.header[i]
1561 def revert_bibfileencodings(document):
1562 " Revert individual Biblatex bibliography encodings "
1566 i = find_token(document.header, "\\cite_engine", 0)
1568 document.warning("Malformed document! Missing \\cite_engine")
1570 engine = get_value(document.header, "\\cite_engine", i)
1574 if engine in ["biblatex", "biblatex-natbib"]:
1577 # Map lyx to latex encoding names
1581 "armscii8" : "armscii8",
1582 "iso8859-1" : "latin1",
1583 "iso8859-2" : "latin2",
1584 "iso8859-3" : "latin3",
1585 "iso8859-4" : "latin4",
1586 "iso8859-5" : "iso88595",
1587 "iso8859-6" : "8859-6",
1588 "iso8859-7" : "iso-8859-7",
1589 "iso8859-8" : "8859-8",
1590 "iso8859-9" : "latin5",
1591 "iso8859-13" : "latin7",
1592 "iso8859-15" : "latin9",
1593 "iso8859-16" : "latin10",
1594 "applemac" : "applemac",
1596 "cp437de" : "cp437de",
1604 "cp1250" : "cp1250",
1605 "cp1251" : "cp1251",
1606 "cp1252" : "cp1252",
1607 "cp1255" : "cp1255",
1608 "cp1256" : "cp1256",
1609 "cp1257" : "cp1257",
1610 "koi8-r" : "koi8-r",
1611 "koi8-u" : "koi8-u",
1613 "utf8-platex" : "utf8",
1620 i = find_token(document.body, "\\begin_inset CommandInset bibtex", i+1)
1623 j = find_end_of_inset(document.body, i)
1625 document.warning("Can't find end of bibtex inset at line %d!!" %(i))
1627 encodings = get_quoted_value(document.body, "file_encodings", i, j)
1631 bibfiles = get_quoted_value(document.body, "bibfiles", i, j).split(",")
1632 opts = get_quoted_value(document.body, "biblatexopts", i, j)
1633 if len(bibfiles) == 0:
1634 document.warning("Bibtex inset at line %d does not have a bibfile!" %(i))
1635 # remove encoding line
1636 k = find_token(document.body, "file_encodings", i, j)
1638 del document.body[k]
1639 # Re-find inset end line
1640 j = find_end_of_inset(document.body, i)
1642 enclist = encodings.split("\t")
1645 ppp = pp.split(" ", 1)
1646 encmap[ppp[0]] = ppp[1]
1647 for bib in bibfiles:
1648 pr = "\\addbibresource"
1649 if bib in encmap.keys():
1650 pr += "[bibencoding=" + encmap[bib] + "]"
1651 pr += "{" + bib + "}"
1652 add_to_preamble(document, [pr])
1653 # Insert ERT \\printbibliography and wrap bibtex inset to a Note
1654 pcmd = "printbibliography"
1656 pcmd += "[" + opts + "]"
1657 repl = ["\\begin_inset ERT", "status open", "", "\\begin_layout Plain Layout",\
1658 "", "", "\\backslash", pcmd, "\\end_layout", "", "\\end_inset", "", "",\
1659 "\\end_layout", "", "\\begin_layout Standard", "\\begin_inset Note Note",\
1660 "status open", "", "\\begin_layout Plain Layout" ]
1661 repl += document.body[i:j+1]
1662 repl += ["", "\\end_layout", "", "\\end_inset", "", ""]
1663 document.body[i:j+1] = repl
1669 def revert_cmidruletrimming(document):
1670 " Remove \\cmidrule trimming "
1672 # FIXME: Revert to TeX code?
1675 # first, let's find out if we need to do anything
1676 i = find_token(document.body, '<cell ', i+1)
1679 j = document.body[i].find('trim="')
1682 rgx = re.compile(r' (bottom|top)line[lr]trim="true"')
1683 # remove trim option
1684 document.body[i] = rgx.sub('', document.body[i])
1688 r'### Inserted by lyx2lyx (ruby inset) ###',
1689 r'InsetLayout Flex:Ruby',
1690 r' LyxType charstyle',
1691 r' LatexType command',
1695 r' HTMLInnerTag rb',
1696 r' HTMLInnerAttr ""',
1698 r' LabelString "Ruby"',
1699 r' Decoration Conglomerate',
1701 r' \ifdefined\kanjiskip',
1702 r' \IfFileExists{okumacro.sty}{\usepackage{okumacro}}{}',
1703 r' \else \ifdefined\luatexversion',
1704 r' \usepackage{luatexja-ruby}',
1705 r' \else \ifdefined\XeTeXversion',
1706 r' \usepackage{ruby}%',
1708 r' \providecommand{\ruby}[2]{\shortstack{\tiny #2\\#1}}',
1710 r' Argument post:1',
1711 r' LabelString "ruby text"',
1712 r' MenuString "Ruby Text|R"',
1713 r' Tooltip "Reading aid (ruby, furigana) for Chinese characters."',
1714 r' Decoration Conglomerate',
1726 def convert_ruby_module(document):
1727 " Use ruby module instead of local module definition "
1728 if document.del_local_layout(ruby_inset_def):
1729 document.add_module("ruby")
1731 def revert_ruby_module(document):
1732 " Replace ruby module with local module definition "
1733 if document.del_module("ruby"):
1734 document.append_local_layout(ruby_inset_def)
1737 def convert_utf8_japanese(document):
1738 " Use generic utf8 with Japanese documents."
1739 lang = get_value(document.header, "\\language")
1740 if not lang.startswith("japanese"):
1742 inputenc = get_value(document.header, "\\inputencoding")
1743 if ((lang == "japanese" and inputenc == "utf8-platex")
1744 or (lang == "japanese-cjk" and inputenc == "utf8-cjk")):
1745 document.set_parameter("inputencoding", "utf8")
1747 def revert_utf8_japanese(document):
1748 " Use Japanese utf8 variants with Japanese documents."
1749 inputenc = get_value(document.header, "\\inputencoding")
1750 if inputenc != "utf8":
1752 lang = get_value(document.header, "\\language")
1753 if lang == "japanese":
1754 document.set_parameter("inputencoding", "utf8-platex")
1755 if lang == "japanese-cjk":
1756 document.set_parameter("inputencoding", "utf8-cjk")
1759 def revert_lineno(document):
1760 " Replace lineno setting with user-preamble code."
1762 options = get_quoted_value(document.header, "\\lineno_options",
1764 if not get_bool_value(document.header, "\\use_lineno", delete=True):
1767 options = "[" + options + "]"
1768 add_to_preamble(document, ["\\usepackage%s{lineno}" % options,
1771 def convert_lineno(document):
1772 " Replace user-preamble code with native lineno support."
1775 i = find_token(document.preamble, "\\linenumbers", 1)
1777 usepkg = re.match(r"\\usepackage(.*){lineno}", document.preamble[i-1])
1780 options = usepkg.group(1).strip("[]")
1781 del(document.preamble[i-1:i+1])
1782 del_token(document.preamble, "% Added by lyx2lyx", i-2, i-1)
1784 k = find_token(document.header, "\\index ")
1786 document.header[k:k] = ["\\use_lineno %d" % use_lineno]
1788 document.header[k:k] = ["\\use_lineno %d" % use_lineno,
1789 "\\lineno_options %s" % options]
1792 def revert_new_languages(document):
1793 """Emulate support for Azerbaijani, Bengali, Church Slavonic, Korean,
1794 and Russian (Petrine orthography)."""
1796 # lyxname: (babelname, polyglossianame)
1797 new_languages = {"azerbaijani": ("azerbaijani", ""),
1798 "bengali": ("", "bengali"),
1799 "churchslavonic": ("", "churchslavonic"),
1800 "oldrussian": ("", "russian"),
1801 "korean": ("", "korean"),
1803 used_languages = set()
1804 if document.language in new_languages:
1805 used_languages.add(document.language)
1808 i = find_token(document.body, "\\lang", i+1)
1811 if document.body[i][6:].strip() in new_languages:
1812 used_languages.add(document.language)
1814 # Korean is already supported via CJK, so leave as-is for Babel
1815 if ("korean" in used_languages
1816 and get_bool_value(document.header, "\\use_non_tex_fonts")
1817 and get_value(document.header, "\\language_package") in ("default", "auto")):
1818 revert_language(document, "korean", "", "korean")
1819 used_languages.discard("korean")
1821 for lang in used_languages:
1822 revert(lang, *new_languages[lang])
1826 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1827 r'InsetLayout Flex:Glosse',
1829 r' LabelString "Gloss (old version)"',
1830 r' MenuString "Gloss (old version)"',
1831 r' LatexType environment',
1832 r' LatexName linggloss',
1833 r' Decoration minimalistic',
1838 r' CustomPars false',
1839 r' ForcePlain true',
1840 r' ParbreakIsNewline true',
1841 r' FreeSpacing true',
1842 r' Requires covington',
1845 r' \@ifundefined{linggloss}{%',
1846 r' \newenvironment{linggloss}[2][]{',
1847 r' \def\glosstr{\glt #1}%',
1849 r' {\glosstr\glend}}{}',
1852 r' ResetsFont true',
1854 r' Decoration conglomerate',
1855 r' LabelString "Translation"',
1856 r' MenuString "Glosse Translation|s"',
1857 r' Tooltip "Add a translation for the glosse"',
1862 glosss_inset_def = [
1863 r'### Inserted by lyx2lyx (deprecated ling glosses) ###',
1864 r'InsetLayout Flex:Tri-Glosse',
1866 r' LabelString "Tri-Gloss (old version)"',
1867 r' MenuString "Tri-Gloss (old version)"',
1868 r' LatexType environment',
1869 r' LatexName lingglosss',
1870 r' Decoration minimalistic',
1875 r' CustomPars false',
1876 r' ForcePlain true',
1877 r' ParbreakIsNewline true',
1878 r' FreeSpacing true',
1880 r' Requires covington',
1883 r' \@ifundefined{lingglosss}{%',
1884 r' \newenvironment{lingglosss}[2][]{',
1885 r' \def\glosstr{\glt #1}%',
1887 r' {\glosstr\glend}}{}',
1889 r' ResetsFont true',
1891 r' Decoration conglomerate',
1892 r' LabelString "Translation"',
1893 r' MenuString "Glosse Translation|s"',
1894 r' Tooltip "Add a translation for the glosse"',
1899 def convert_linggloss(document):
1900 " Move old ling glosses to local layout "
1901 if find_token(document.body, '\\begin_inset Flex Glosse', 0) != -1:
1902 document.append_local_layout(gloss_inset_def)
1903 if find_token(document.body, '\\begin_inset Flex Tri-Glosse', 0) != -1:
1904 document.append_local_layout(glosss_inset_def)
1906 def revert_linggloss(document):
1907 " Revert to old ling gloss definitions "
1908 if not "linguistics" in document.get_module_list():
1910 document.del_local_layout(gloss_inset_def)
1911 document.del_local_layout(glosss_inset_def)
1914 glosses = ["\\begin_inset Flex Interlinear Gloss (2 Lines)", "\\begin_inset Flex Interlinear Gloss (3 Lines)"]
1915 for glosse in glosses:
1918 i = find_token(document.body, glosse, i+1)
1921 j = find_end_of_inset(document.body, i)
1923 document.warning("Malformed LyX document: Can't find end of Gloss inset")
1926 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
1927 endarg = find_end_of_inset(document.body, arg)
1930 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1931 if argbeginPlain == -1:
1932 document.warning("Malformed LyX document: Can't find optarg plain Layout")
1934 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1935 optargcontent = document.body[argbeginPlain + 1 : argendPlain - 2]
1937 # remove Arg insets and paragraph, if it only contains this inset
1938 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1939 del document.body[arg - 1 : endarg + 4]
1941 del document.body[arg : endarg + 1]
1943 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
1944 endarg = find_end_of_inset(document.body, arg)
1947 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1948 if argbeginPlain == -1:
1949 document.warning("Malformed LyX document: Can't find arg 1 plain Layout")
1951 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1952 marg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
1954 # remove Arg insets and paragraph, if it only contains this inset
1955 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1956 del document.body[arg - 1 : endarg + 4]
1958 del document.body[arg : endarg + 1]
1960 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
1961 endarg = find_end_of_inset(document.body, arg)
1964 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1965 if argbeginPlain == -1:
1966 document.warning("Malformed LyX document: Can't find arg 2 plain Layout")
1968 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1969 marg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
1971 # remove Arg insets and paragraph, if it only contains this inset
1972 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1973 del document.body[arg - 1 : endarg + 4]
1975 del document.body[arg : endarg + 1]
1977 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
1978 endarg = find_end_of_inset(document.body, arg)
1981 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
1982 if argbeginPlain == -1:
1983 document.warning("Malformed LyX document: Can't find arg 3 plain Layout")
1985 argendPlain = find_end_of_inset(document.body, argbeginPlain)
1986 marg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
1988 # remove Arg insets and paragraph, if it only contains this inset
1989 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
1990 del document.body[arg - 1 : endarg + 4]
1992 del document.body[arg : endarg + 1]
1995 if glosse == "\\begin_inset Flex Interlinear Gloss (3 Lines)":
1998 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
1999 endInset = find_end_of_inset(document.body, i)
2000 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2001 precontent = put_cmd_in_ert(cmd)
2002 if len(optargcontent) > 0:
2003 precontent += put_cmd_in_ert("[") + optargcontent + put_cmd_in_ert("]")
2004 precontent += put_cmd_in_ert("{")
2006 postcontent = put_cmd_in_ert("}{") + marg1content + put_cmd_in_ert("}{") + marg2content
2007 if cmd == "\\trigloss":
2008 postcontent += put_cmd_in_ert("}{") + marg3content
2009 postcontent += put_cmd_in_ert("}")
2011 document.body[endPlain:endInset + 1] = postcontent
2012 document.body[beginPlain + 1:beginPlain] = precontent
2013 del document.body[i : beginPlain + 1]
2015 document.append_local_layout("Requires covington")
2020 def revert_subexarg(document):
2021 " Revert linguistic subexamples with argument to ERT "
2023 if not "linguistics" in document.get_module_list():
2029 i = find_token(document.body, "\\begin_layout Subexample", i+1)
2032 j = find_end_of_layout(document.body, i)
2034 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2037 # check for consecutive layouts
2038 k = find_token(document.body, "\\begin_layout", j)
2039 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2041 j = find_end_of_layout(document.body, k)
2043 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2046 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2050 endarg = find_end_of_inset(document.body, arg)
2052 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2053 if argbeginPlain == -1:
2054 document.warning("Malformed LyX document: Can't find optarg plain Layout")
2056 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2057 optargcontent = lyx2latex(document, document.body[argbeginPlain + 1 : argendPlain - 2])
2059 # remove Arg insets and paragraph, if it only contains this inset
2060 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2061 del document.body[arg - 1 : endarg + 4]
2063 del document.body[arg : endarg + 1]
2065 cmd = put_cmd_in_ert("\\begin{subexamples}[" + optargcontent + "]")
2067 # re-find end of layout
2068 j = find_end_of_layout(document.body, i)
2070 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2073 # check for consecutive layouts
2074 k = find_token(document.body, "\\begin_layout", j)
2075 if k == -1 or document.body[k] != "\\begin_layout Subexample":
2077 document.body[k : k + 1] = ["\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2078 j = find_end_of_layout(document.body, k)
2080 document.warning("Malformed LyX document: Can't find end of Subexample layout")
2083 endev = put_cmd_in_ert("\\end{subexamples}")
2085 document.body[j : j] = ["\\end_layout", "", "\\begin_layout Standard"] + endev
2086 document.body[i : i + 1] = ["\\begin_layout Standard"] + cmd \
2087 + ["\\end_layout", "", "\\begin_layout Standard"] + put_cmd_in_ert("\\item ")
2089 document.append_local_layout("Requires covington")
2093 def revert_drs(document):
2094 " Revert DRS insets (linguistics) to ERT "
2096 if not "linguistics" in document.get_module_list():
2100 drses = ["\\begin_inset Flex DRS", "\\begin_inset Flex DRS*",
2101 "\\begin_inset Flex IfThen-DRS", "\\begin_inset Flex Cond-DRS",
2102 "\\begin_inset Flex QDRS", "\\begin_inset Flex NegDRS",
2103 "\\begin_inset Flex SDRS"]
2107 i = find_token(document.body, drs, i+1)
2110 j = find_end_of_inset(document.body, i)
2112 document.warning("Malformed LyX document: Can't find end of DRS inset")
2115 # Check for arguments
2116 arg = find_token(document.body, "\\begin_inset Argument 1", i, j)
2117 endarg = find_end_of_inset(document.body, arg)
2120 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2121 if argbeginPlain == -1:
2122 document.warning("Malformed LyX document: Can't find Argument 1 plain Layout")
2124 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2125 prearg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2127 # remove Arg insets and paragraph, if it only contains this inset
2128 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2129 del document.body[arg - 1 : endarg + 4]
2131 del document.body[arg : endarg + 1]
2134 j = find_end_of_inset(document.body, i)
2136 document.warning("Malformed LyX document: Can't find end of DRS inset")
2139 arg = find_token(document.body, "\\begin_inset Argument 2", i, j)
2140 endarg = find_end_of_inset(document.body, arg)
2143 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2144 if argbeginPlain == -1:
2145 document.warning("Malformed LyX document: Can't find Argument 2 plain Layout")
2147 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2148 prearg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2150 # remove Arg insets and paragraph, if it only contains this inset
2151 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2152 del document.body[arg - 1 : endarg + 4]
2154 del document.body[arg : endarg + 1]
2157 j = find_end_of_inset(document.body, i)
2159 document.warning("Malformed LyX document: Can't find end of DRS inset")
2162 arg = find_token(document.body, "\\begin_inset Argument post:1", i, j)
2163 endarg = find_end_of_inset(document.body, arg)
2164 postarg1content = []
2166 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2167 if argbeginPlain == -1:
2168 document.warning("Malformed LyX document: Can't find Argument post:1 plain Layout")
2170 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2171 postarg1content = document.body[argbeginPlain + 1 : argendPlain - 2]
2173 # remove Arg insets and paragraph, if it only contains this inset
2174 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2175 del document.body[arg - 1 : endarg + 4]
2177 del document.body[arg : endarg + 1]
2180 j = find_end_of_inset(document.body, i)
2182 document.warning("Malformed LyX document: Can't find end of DRS inset")
2185 arg = find_token(document.body, "\\begin_inset Argument post:2", i, j)
2186 endarg = find_end_of_inset(document.body, arg)
2187 postarg2content = []
2189 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2190 if argbeginPlain == -1:
2191 document.warning("Malformed LyX document: Can't find Argument post:2 plain Layout")
2193 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2194 postarg2content = document.body[argbeginPlain + 1 : argendPlain - 2]
2196 # remove Arg insets and paragraph, if it only contains this inset
2197 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2198 del document.body[arg - 1 : endarg + 4]
2200 del document.body[arg : endarg + 1]
2203 j = find_end_of_inset(document.body, i)
2205 document.warning("Malformed LyX document: Can't find end of DRS inset")
2208 arg = find_token(document.body, "\\begin_inset Argument post:3", i, j)
2209 endarg = find_end_of_inset(document.body, arg)
2210 postarg3content = []
2212 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2213 if argbeginPlain == -1:
2214 document.warning("Malformed LyX document: Can't find Argument post:3 plain Layout")
2216 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2217 postarg3content = document.body[argbeginPlain + 1 : argendPlain - 2]
2219 # remove Arg insets and paragraph, if it only contains this inset
2220 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2221 del document.body[arg - 1 : endarg + 4]
2223 del document.body[arg : endarg + 1]
2226 j = find_end_of_inset(document.body, i)
2228 document.warning("Malformed LyX document: Can't find end of DRS inset")
2231 arg = find_token(document.body, "\\begin_inset Argument post:4", i, j)
2232 endarg = find_end_of_inset(document.body, arg)
2233 postarg4content = []
2235 argbeginPlain = find_token(document.body, "\\begin_layout Plain Layout", arg, endarg)
2236 if argbeginPlain == -1:
2237 document.warning("Malformed LyX document: Can't find Argument post:4 plain Layout")
2239 argendPlain = find_end_of_inset(document.body, argbeginPlain)
2240 postarg4content = document.body[argbeginPlain + 1 : argendPlain - 2]
2242 # remove Arg insets and paragraph, if it only contains this inset
2243 if document.body[arg - 1] == "\\begin_layout Plain Layout" and find_end_of_layout(document.body, arg - 1) == endarg + 3:
2244 del document.body[arg - 1 : endarg + 4]
2246 del document.body[arg : endarg + 1]
2248 # The respective LaTeX command
2250 if drs == "\\begin_inset Flex DRS*":
2252 elif drs == "\\begin_inset Flex IfThen-DRS":
2254 elif drs == "\\begin_inset Flex Cond-DRS":
2256 elif drs == "\\begin_inset Flex QDRS":
2258 elif drs == "\\begin_inset Flex NegDRS":
2260 elif drs == "\\begin_inset Flex SDRS":
2263 beginPlain = find_token(document.body, "\\begin_layout Plain Layout", i)
2264 endInset = find_end_of_inset(document.body, i)
2265 endPlain = find_token_backwards(document.body, "\\end_layout", endInset)
2266 precontent = put_cmd_in_ert(cmd)
2267 precontent += put_cmd_in_ert("{") + prearg1content + put_cmd_in_ert("}")
2268 if drs == "\\begin_inset Flex SDRS":
2269 precontent += put_cmd_in_ert("{") + prearg2content + put_cmd_in_ert("}")
2270 precontent += put_cmd_in_ert("{")
2273 if cmd == "\\qdrs" or cmd == "\\condrs" or cmd == "\\ifdrs":
2274 postcontent = put_cmd_in_ert("}{") + postarg1content + put_cmd_in_ert("}{") + postarg2content + put_cmd_in_ert("}")
2275 if cmd == "\\condrs" or cmd == "\\qdrs":
2276 postcontent += put_cmd_in_ert("{") + postarg3content + put_cmd_in_ert("}")
2278 postcontent += put_cmd_in_ert("{") + postarg4content + put_cmd_in_ert("}")
2280 postcontent = put_cmd_in_ert("}")
2282 document.body[endPlain:endInset + 1] = postcontent
2283 document.body[beginPlain + 1:beginPlain] = precontent
2284 del document.body[i : beginPlain + 1]
2286 document.append_local_layout("Provides covington 1")
2287 add_to_preamble(document, ["\\usepackage{drs,covington}"])
2293 def revert_babelfont(document):
2294 " Reverts the use of \\babelfont to user preamble "
2296 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2298 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2300 if not str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2302 i = find_token(document.header, '\\language_package', 0)
2304 document.warning("Malformed LyX document: Missing \\language_package.")
2306 if get_value(document.header, "\\language_package", 0) != "babel":
2309 # check font settings
2311 roman = sans = typew = "default"
2313 sf_scale = tt_scale = 100.0
2315 j = find_token(document.header, "\\font_roman", 0)
2317 document.warning("Malformed LyX document: Missing \\font_roman.")
2319 # We need to use this regex since split() does not handle quote protection
2320 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2321 roman = romanfont[2].strip('"')
2322 romanfont[2] = '"default"'
2323 document.header[j] = " ".join(romanfont)
2325 j = find_token(document.header, "\\font_sans", 0)
2327 document.warning("Malformed LyX document: Missing \\font_sans.")
2329 # We need to use this regex since split() does not handle quote protection
2330 sansfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2331 sans = sansfont[2].strip('"')
2332 sansfont[2] = '"default"'
2333 document.header[j] = " ".join(sansfont)
2335 j = find_token(document.header, "\\font_typewriter", 0)
2337 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2339 # We need to use this regex since split() does not handle quote protection
2340 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2341 typew = ttfont[2].strip('"')
2342 ttfont[2] = '"default"'
2343 document.header[j] = " ".join(ttfont)
2345 i = find_token(document.header, "\\font_osf", 0)
2347 document.warning("Malformed LyX document: Missing \\font_osf.")
2349 osf = str2bool(get_value(document.header, "\\font_osf", i))
2351 j = find_token(document.header, "\\font_sf_scale", 0)
2353 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2355 sfscale = document.header[j].split()
2358 document.header[j] = " ".join(sfscale)
2361 sf_scale = float(val)
2363 document.warning("Invalid font_sf_scale value: " + val)
2365 j = find_token(document.header, "\\font_tt_scale", 0)
2367 document.warning("Malformed LyX document: Missing \\font_tt_scale.")
2369 ttscale = document.header[j].split()
2372 document.header[j] = " ".join(ttscale)
2375 tt_scale = float(val)
2377 document.warning("Invalid font_tt_scale value: " + val)
2379 # set preamble stuff
2380 pretext = ['%% This document must be processed with xelatex or lualatex!']
2381 pretext.append('\\AtBeginDocument{%')
2382 if roman != "default":
2383 pretext.append('\\babelfont{rm}[Mapping=tex-text]{' + roman + '}')
2384 if sans != "default":
2385 sf = '\\babelfont{sf}['
2386 if sf_scale != 100.0:
2387 sf += 'Scale=' + str(sf_scale / 100.0) + ','
2388 sf += 'Mapping=tex-text]{' + sans + '}'
2390 if typew != "default":
2391 tw = '\\babelfont{tt}'
2392 if tt_scale != 100.0:
2393 tw += '[Scale=' + str(tt_scale / 100.0) + ']'
2394 tw += '{' + typew + '}'
2397 pretext.append('\\defaultfontfeatures{Numbers=OldStyle}')
2399 insert_to_preamble(document, pretext)
2402 def revert_minionpro(document):
2403 " Revert native MinionPro font definition (with extra options) to LaTeX "
2405 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2407 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2409 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2412 regexp = re.compile(r'(\\font_roman_opts)')
2413 x = find_re(document.header, regexp, 0)
2417 # We need to use this regex since split() does not handle quote protection
2418 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2419 opts = romanopts[1].strip('"')
2421 i = find_token(document.header, "\\font_roman", 0)
2423 document.warning("Malformed LyX document: Missing \\font_roman.")
2426 # We need to use this regex since split() does not handle quote protection
2427 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2428 roman = romanfont[1].strip('"')
2429 if roman != "minionpro":
2431 romanfont[1] = '"default"'
2432 document.header[i] = " ".join(romanfont)
2434 j = find_token(document.header, "\\font_osf true", 0)
2437 preamble = "\\usepackage["
2439 document.header[j] = "\\font_osf false"
2443 preamble += "]{MinionPro}"
2444 add_to_preamble(document, [preamble])
2445 del document.header[x]
2448 def revert_font_opts(document):
2449 " revert font options by outputting \\setxxxfont or \\babelfont to the preamble "
2451 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2453 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2455 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2456 i = find_token(document.header, '\\language_package', 0)
2458 document.warning("Malformed LyX document: Missing \\language_package.")
2460 Babel = (get_value(document.header, "\\language_package", 0) == "babel")
2463 regexp = re.compile(r'(\\font_roman_opts)')
2464 i = find_re(document.header, regexp, 0)
2466 # We need to use this regex since split() does not handle quote protection
2467 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2468 opts = romanopts[1].strip('"')
2469 del document.header[i]
2471 regexp = re.compile(r'(\\font_roman)')
2472 i = find_re(document.header, regexp, 0)
2474 # We need to use this regex since split() does not handle quote protection
2475 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2476 font = romanfont[2].strip('"')
2477 romanfont[2] = '"default"'
2478 document.header[i] = " ".join(romanfont)
2479 if font != "default":
2481 preamble = "\\babelfont{rm}["
2483 preamble = "\\setmainfont["
2486 preamble += "Mapping=tex-text]{"
2489 add_to_preamble(document, [preamble])
2492 regexp = re.compile(r'(\\font_sans_opts)')
2493 i = find_re(document.header, regexp, 0)
2496 # We need to use this regex since split() does not handle quote protection
2497 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2498 opts = sfopts[1].strip('"')
2499 del document.header[i]
2501 regexp = re.compile(r'(\\font_sf_scale)')
2502 i = find_re(document.header, regexp, 0)
2504 scaleval = get_value(document.header, "\\font_sf_scale" , i).split()[1]
2505 regexp = re.compile(r'(\\font_sans)')
2506 i = find_re(document.header, regexp, 0)
2508 # We need to use this regex since split() does not handle quote protection
2509 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2510 font = sffont[2].strip('"')
2511 sffont[2] = '"default"'
2512 document.header[i] = " ".join(sffont)
2513 if font != "default":
2515 preamble = "\\babelfont{sf}["
2517 preamble = "\\setsansfont["
2521 preamble += "Scale=0."
2522 preamble += scaleval
2524 preamble += "Mapping=tex-text]{"
2527 add_to_preamble(document, [preamble])
2530 regexp = re.compile(r'(\\font_typewriter_opts)')
2531 i = find_re(document.header, regexp, 0)
2534 # We need to use this regex since split() does not handle quote protection
2535 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2536 opts = ttopts[1].strip('"')
2537 del document.header[i]
2539 regexp = re.compile(r'(\\font_tt_scale)')
2540 i = find_re(document.header, regexp, 0)
2542 scaleval = get_value(document.header, "\\font_tt_scale" , i).split()[1]
2543 regexp = re.compile(r'(\\font_typewriter)')
2544 i = find_re(document.header, regexp, 0)
2546 # We need to use this regex since split() does not handle quote protection
2547 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2548 font = ttfont[2].strip('"')
2549 ttfont[2] = '"default"'
2550 document.header[i] = " ".join(ttfont)
2551 if font != "default":
2553 preamble = "\\babelfont{tt}["
2555 preamble = "\\setmonofont["
2559 preamble += "Scale=0."
2560 preamble += scaleval
2562 preamble += "Mapping=tex-text]{"
2565 add_to_preamble(document, [preamble])
2568 def revert_plainNotoFonts_xopts(document):
2569 " Revert native (straight) Noto font definition (with extra options) to LaTeX "
2571 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2573 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2575 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2579 y = find_token(document.header, "\\font_osf true", 0)
2583 regexp = re.compile(r'(\\font_roman_opts)')
2584 x = find_re(document.header, regexp, 0)
2585 if x == -1 and not osf:
2590 # We need to use this regex since split() does not handle quote protection
2591 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2592 opts = romanopts[1].strip('"')
2598 i = find_token(document.header, "\\font_roman", 0)
2602 # We need to use this regex since split() does not handle quote protection
2603 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2604 roman = romanfont[1].strip('"')
2605 if roman != "NotoSerif-TLF":
2608 j = find_token(document.header, "\\font_sans", 0)
2612 # We need to use this regex since split() does not handle quote protection
2613 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2614 sf = sffont[1].strip('"')
2618 j = find_token(document.header, "\\font_typewriter", 0)
2622 # We need to use this regex since split() does not handle quote protection
2623 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[j])
2624 tt = ttfont[1].strip('"')
2628 # So we have noto as "complete font"
2629 romanfont[1] = '"default"'
2630 document.header[i] = " ".join(romanfont)
2632 preamble = "\\usepackage["
2634 preamble += "]{noto}"
2635 add_to_preamble(document, [preamble])
2637 document.header[y] = "\\font_osf false"
2639 del document.header[x]
2642 def revert_notoFonts_xopts(document):
2643 " Revert native (extended) Noto font definition (with extra options) to LaTeX "
2645 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2647 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2649 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2653 fm = createFontMapping(['Noto'])
2654 if revert_fonts(document, fm, fontmap, True):
2655 add_preamble_fonts(document, fontmap)
2658 def revert_IBMFonts_xopts(document):
2659 " Revert native IBM font definition (with extra options) to LaTeX "
2661 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2663 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2665 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2669 fm = createFontMapping(['IBM'])
2671 if revert_fonts(document, fm, fontmap, True):
2672 add_preamble_fonts(document, fontmap)
2675 def revert_AdobeFonts_xopts(document):
2676 " Revert native Adobe font definition (with extra options) to LaTeX "
2678 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2680 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2682 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2686 fm = createFontMapping(['Adobe'])
2688 if revert_fonts(document, fm, fontmap, True):
2689 add_preamble_fonts(document, fontmap)
2692 def convert_osf(document):
2693 " Convert \\font_osf param to new format "
2696 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2698 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2700 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2702 i = find_token(document.header, '\\font_osf', 0)
2704 document.warning("Malformed LyX document: Missing \\font_osf.")
2707 osfsf = ["biolinum", "ADOBESourceSansPro", "NotoSansRegular", "NotoSansMedium", "NotoSansThin", "NotoSansLight", "NotoSansExtralight" ]
2708 osftt = ["ADOBESourceCodePro", "NotoMonoRegular" ]
2710 osfval = str2bool(get_value(document.header, "\\font_osf", i))
2711 document.header[i] = document.header[i].replace("\\font_osf", "\\font_roman_osf")
2714 document.header.insert(i, "\\font_sans_osf false")
2715 document.header.insert(i + 1, "\\font_typewriter_osf false")
2719 x = find_token(document.header, "\\font_sans", 0)
2721 document.warning("Malformed LyX document: Missing \\font_sans.")
2723 # We need to use this regex since split() does not handle quote protection
2724 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2725 sf = sffont[1].strip('"')
2727 document.header.insert(i, "\\font_sans_osf true")
2729 document.header.insert(i, "\\font_sans_osf false")
2731 x = find_token(document.header, "\\font_typewriter", 0)
2733 document.warning("Malformed LyX document: Missing \\font_typewriter.")
2735 # We need to use this regex since split() does not handle quote protection
2736 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2737 tt = ttfont[1].strip('"')
2739 document.header.insert(i + 1, "\\font_typewriter_osf true")
2741 document.header.insert(i + 1, "\\font_typewriter_osf false")
2744 document.header.insert(i, "\\font_sans_osf false")
2745 document.header.insert(i + 1, "\\font_typewriter_osf false")
2748 def revert_osf(document):
2749 " Revert \\font_*_osf params "
2752 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2754 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2756 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2758 i = find_token(document.header, '\\font_roman_osf', 0)
2760 document.warning("Malformed LyX document: Missing \\font_roman_osf.")
2763 osfval = str2bool(get_value(document.header, "\\font_roman_osf", i))
2764 document.header[i] = document.header[i].replace("\\font_roman_osf", "\\font_osf")
2766 i = find_token(document.header, '\\font_sans_osf', 0)
2768 document.warning("Malformed LyX document: Missing \\font_sans_osf.")
2771 osfval = str2bool(get_value(document.header, "\\font_sans_osf", i))
2772 del document.header[i]
2774 i = find_token(document.header, '\\font_typewriter_osf', 0)
2776 document.warning("Malformed LyX document: Missing \\font_typewriter_osf.")
2779 osfval |= str2bool(get_value(document.header, "\\font_typewriter_osf", i))
2780 del document.header[i]
2783 i = find_token(document.header, '\\font_osf', 0)
2785 document.warning("Malformed LyX document: Missing \\font_osf.")
2787 document.header[i] = "\\font_osf true"
2790 def revert_texfontopts(document):
2791 " Revert native TeX font definitions (with extra options) to LaTeX "
2793 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2795 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2797 if str2bool(get_value(document.header, "\\use_non_tex_fonts", i)):
2800 rmfonts = ["ccfonts", "cochineal", "utopia", "garamondx", "libertine", "lmodern", "palatino", "times", "xcharter" ]
2802 # First the sf (biolinum only)
2803 regexp = re.compile(r'(\\font_sans_opts)')
2804 x = find_re(document.header, regexp, 0)
2806 # We need to use this regex since split() does not handle quote protection
2807 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2808 opts = sfopts[1].strip('"')
2809 i = find_token(document.header, "\\font_sans", 0)
2811 document.warning("Malformed LyX document: Missing \\font_sans.")
2813 # We need to use this regex since split() does not handle quote protection
2814 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2815 sans = sffont[1].strip('"')
2816 if sans == "biolinum":
2818 sffont[1] = '"default"'
2819 document.header[i] = " ".join(sffont)
2821 j = find_token(document.header, "\\font_sans_osf true", 0)
2824 k = find_token(document.header, "\\font_sf_scale", 0)
2826 document.warning("Malformed LyX document: Missing \\font_sf_scale.")
2828 sfscale = document.header[k].split()
2831 document.header[k] = " ".join(sfscale)
2834 sf_scale = float(val)
2836 document.warning("Invalid font_sf_scale value: " + val)
2837 preamble = "\\usepackage["
2839 document.header[j] = "\\font_sans_osf false"
2841 if sf_scale != 100.0:
2842 preamble += 'scaled=' + str(sf_scale / 100.0) + ','
2844 preamble += "]{biolinum}"
2845 add_to_preamble(document, [preamble])
2846 del document.header[x]
2848 regexp = re.compile(r'(\\font_roman_opts)')
2849 x = find_re(document.header, regexp, 0)
2853 # We need to use this regex since split() does not handle quote protection
2854 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2855 opts = romanopts[1].strip('"')
2857 i = find_token(document.header, "\\font_roman", 0)
2859 document.warning("Malformed LyX document: Missing \\font_roman.")
2862 # We need to use this regex since split() does not handle quote protection
2863 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2864 roman = romanfont[1].strip('"')
2865 if not roman in rmfonts:
2867 romanfont[1] = '"default"'
2868 document.header[i] = " ".join(romanfont)
2870 if roman == "utopia":
2872 elif roman == "palatino":
2873 package = "mathpazo"
2874 elif roman == "times":
2875 package = "mathptmx"
2876 elif roman == "xcharter":
2877 package = "XCharter"
2879 j = find_token(document.header, "\\font_roman_osf true", 0)
2881 if roman == "cochineal":
2882 osf = "proportional,osf,"
2883 elif roman == "utopia":
2885 elif roman == "garamondx":
2887 elif roman == "libertine":
2889 elif roman == "palatino":
2891 elif roman == "xcharter":
2893 document.header[j] = "\\font_roman_osf false"
2894 k = find_token(document.header, "\\font_sc true", 0)
2896 if roman == "utopia":
2898 if roman == "palatino" and osf == "":
2900 document.header[k] = "\\font_sc false"
2901 preamble = "\\usepackage["
2904 preamble += "]{" + package + "}"
2905 add_to_preamble(document, [preamble])
2906 del document.header[x]
2909 def convert_CantarellFont(document):
2910 " Handle Cantarell font definition to LaTeX "
2912 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2913 fm = createFontMapping(['Cantarell'])
2914 convert_fonts(document, fm, "oldstyle")
2916 def revert_CantarellFont(document):
2917 " Revert native Cantarell font definition to LaTeX "
2919 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2921 fm = createFontMapping(['Cantarell'])
2922 if revert_fonts(document, fm, fontmap, False, True):
2923 add_preamble_fonts(document, fontmap)
2925 def convert_ChivoFont(document):
2926 " Handle Chivo font definition to LaTeX "
2928 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2929 fm = createFontMapping(['Chivo'])
2930 convert_fonts(document, fm, "oldstyle")
2932 def revert_ChivoFont(document):
2933 " Revert native Chivo font definition to LaTeX "
2935 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2937 fm = createFontMapping(['Chivo'])
2938 if revert_fonts(document, fm, fontmap, False, True):
2939 add_preamble_fonts(document, fontmap)
2942 def convert_FiraFont(document):
2943 " Handle Fira font definition to LaTeX "
2945 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2946 fm = createFontMapping(['Fira'])
2947 convert_fonts(document, fm, "lf")
2949 def revert_FiraFont(document):
2950 " Revert native Fira font definition to LaTeX "
2952 if find_token(document.header, "\\use_non_tex_fonts false", 0) != -1:
2954 fm = createFontMapping(['Fira'])
2955 if revert_fonts(document, fm, fontmap, False, True):
2956 add_preamble_fonts(document, fontmap)
2959 def convert_Semibolds(document):
2960 " Move semibold options to extraopts "
2963 i = find_token(document.header, '\\use_non_tex_fonts', 0)
2965 document.warning("Malformed LyX document: Missing \\use_non_tex_fonts.")
2967 NonTeXFonts = str2bool(get_value(document.header, "\\use_non_tex_fonts", i))
2969 i = find_token(document.header, "\\font_roman", 0)
2971 document.warning("Malformed LyX document: Missing \\font_roman.")
2973 # We need to use this regex since split() does not handle quote protection
2974 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
2975 roman = romanfont[1].strip('"')
2976 if roman == "IBMPlexSerifSemibold":
2977 romanfont[1] = '"IBMPlexSerif"'
2978 document.header[i] = " ".join(romanfont)
2980 if NonTeXFonts == False:
2981 regexp = re.compile(r'(\\font_roman_opts)')
2982 x = find_re(document.header, regexp, 0)
2984 # Sensible place to insert tag
2985 fo = find_token(document.header, "\\font_sf_scale")
2987 document.warning("Malformed LyX document! Missing \\font_sf_scale")
2989 document.header.insert(fo, "\\font_roman_opts \"semibold\"")
2991 # We need to use this regex since split() does not handle quote protection
2992 romanopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
2993 document.header[x] = "\\font_roman_opts \"semibold, " + romanopts[1].strip('"') + "\""
2995 i = find_token(document.header, "\\font_sans", 0)
2997 document.warning("Malformed LyX document: Missing \\font_sans.")
2999 # We need to use this regex since split() does not handle quote protection
3000 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3001 sf = sffont[1].strip('"')
3002 if sf == "IBMPlexSansSemibold":
3003 sffont[1] = '"IBMPlexSans"'
3004 document.header[i] = " ".join(sffont)
3006 if NonTeXFonts == False:
3007 regexp = re.compile(r'(\\font_sans_opts)')
3008 x = find_re(document.header, regexp, 0)
3010 # Sensible place to insert tag
3011 fo = find_token(document.header, "\\font_sf_scale")
3013 document.warning("Malformed LyX document! Missing \\font_sf_scale")
3015 document.header.insert(fo, "\\font_sans_opts \"semibold\"")
3017 # We need to use this regex since split() does not handle quote protection
3018 sfopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3019 document.header[x] = "\\font_sans_opts \"semibold, " + sfopts[1].strip('"') + "\""
3021 i = find_token(document.header, "\\font_typewriter", 0)
3023 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3025 # We need to use this regex since split() does not handle quote protection
3026 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3027 tt = ttfont[1].strip('"')
3028 if tt == "IBMPlexMonoSemibold":
3029 ttfont[1] = '"IBMPlexMono"'
3030 document.header[i] = " ".join(ttfont)
3032 if NonTeXFonts == False:
3033 regexp = re.compile(r'(\\font_typewriter_opts)')
3034 x = find_re(document.header, regexp, 0)
3036 # Sensible place to insert tag
3037 fo = find_token(document.header, "\\font_tt_scale")
3039 document.warning("Malformed LyX document! Missing \\font_tt_scale")
3041 document.header.insert(fo, "\\font_typewriter_opts \"semibold\"")
3043 # We need to use this regex since split() does not handle quote protection
3044 ttopts = re.findall(r'[^"\s]\S*|".+?"', document.header[x])
3045 document.header[x] = "\\font_typewriter_opts \"semibold, " + sfopts[1].strip('"') + "\""
3048 def convert_NotoRegulars(document):
3049 " Merge diverse noto reagular fonts "
3051 i = find_token(document.header, "\\font_roman", 0)
3053 document.warning("Malformed LyX document: Missing \\font_roman.")
3055 # We need to use this regex since split() does not handle quote protection
3056 romanfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3057 roman = romanfont[1].strip('"')
3058 if roman == "NotoSerif-TLF":
3059 romanfont[1] = '"NotoSerifRegular"'
3060 document.header[i] = " ".join(romanfont)
3062 i = find_token(document.header, "\\font_sans", 0)
3064 document.warning("Malformed LyX document: Missing \\font_sans.")
3066 # We need to use this regex since split() does not handle quote protection
3067 sffont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3068 sf = sffont[1].strip('"')
3069 if sf == "NotoSans-TLF":
3070 sffont[1] = '"NotoSansRegular"'
3071 document.header[i] = " ".join(sffont)
3073 i = find_token(document.header, "\\font_typewriter", 0)
3075 document.warning("Malformed LyX document: Missing \\font_typewriter.")
3077 # We need to use this regex since split() does not handle quote protection
3078 ttfont = re.findall(r'[^"\s]\S*|".+?"', document.header[i])
3079 tt = ttfont[1].strip('"')
3080 if tt == "NotoMono-TLF":
3081 ttfont[1] = '"NotoMonoRegular"'
3082 document.header[i] = " ".join(ttfont)
3089 supported_versions = ["2.4.0", "2.4"]
3091 [545, [convert_lst_literalparam]],
3096 [550, [convert_fontenc]],
3103 [557, [convert_vcsinfo]],
3104 [558, [removeFrontMatterStyles]],
3107 [561, [convert_latexFonts]], # Handle dejavu, ibmplex fonts in GUI
3111 [565, [convert_AdobeFonts]], # Handle adobe fonts in GUI
3112 [566, [convert_hebrew_parentheses]],
3118 [572, [convert_notoFonts]], # Added options thin, light, extralight for Noto
3119 [573, [convert_inputencoding_namechange]],
3120 [574, [convert_ruby_module, convert_utf8_japanese]],
3121 [575, [convert_lineno]],
3123 [577, [convert_linggloss]],
3127 [581, [convert_osf]],
3128 [582, [convert_AdobeFonts,convert_latexFonts,convert_notoFonts,convert_CantarellFont,convert_FiraFont]],# old font re-converterted due to extra options
3129 [583, [convert_ChivoFont,convert_Semibolds,convert_NotoRegulars]],
3132 revert = [[582, [revert_ChivoFont]],
3133 [581, [revert_CantarellFont,revert_FiraFont]],
3134 [580, [revert_texfontopts,revert_osf]],
3135 [579, [revert_minionpro, revert_plainNotoFonts_xopts, revert_notoFonts_xopts, revert_IBMFonts_xopts, revert_AdobeFonts_xopts, revert_font_opts]], # keep revert_font_opts last!
3136 [578, [revert_babelfont]],
3137 [577, [revert_drs]],
3138 [576, [revert_linggloss, revert_subexarg]],
3139 [575, [revert_new_languages]],
3140 [574, [revert_lineno]],
3141 [573, [revert_ruby_module, revert_utf8_japanese]],
3142 [572, [revert_inputencoding_namechange]],
3143 [571, [revert_notoFonts]],
3144 [570, [revert_cmidruletrimming]],
3145 [569, [revert_bibfileencodings]],
3146 [568, [revert_tablestyle]],
3147 [567, [revert_soul]],
3148 [566, [revert_malayalam]],
3149 [565, [revert_hebrew_parentheses]],
3150 [564, [revert_AdobeFonts]],
3151 [563, [revert_lformatinfo]],
3152 [562, [revert_listpargs]],
3153 [561, [revert_l7ninfo]],
3154 [560, [revert_latexFonts]], # Handle dejavu, ibmplex fonts in user preamble
3155 [559, [revert_timeinfo, revert_namenoextinfo]],
3156 [558, [revert_dateinfo]],
3157 [557, [addFrontMatterStyles]],
3158 [556, [revert_vcsinfo]],
3159 [555, [revert_bibencoding]],
3160 [554, [revert_vcolumns]],
3161 [553, [revert_stretchcolumn]],
3162 [552, [revert_tuftecite]],
3163 [551, [revert_floatpclass, revert_floatalignment]],
3164 [550, [revert_nospellcheck]],
3165 [549, [revert_fontenc]],
3166 [548, []],# dummy format change
3167 [547, [revert_lscape]],
3168 [546, [revert_xcharter]],
3169 [545, [revert_paratype]],
3170 [544, [revert_lst_literalparam]]
3174 if __name__ == "__main__":